services: llama_server_rocm: image: ollama/ollama:rocm restart: always ports: - "11434:11434" volumes: - ./container_data:/root/.ollama devices: - /dev/kfd - /dev/dri llama_server_nvidia: image: ollama/ollama:latest ports: - "11434:11434" volumes: - ./container_data:/root/.ollama:z privileged: true # Had to be done so nvidia can find gpus... restart: always deploy: resources: reservations: devices: - driver: nvidia count: 1 # alternatively, use `count: all` for all GPUs capabilities: [gpu]