29 lines
647 B
YAML
29 lines
647 B
YAML
services:
|
|
llama_server_rocm:
|
|
image: ollama/ollama:rocm
|
|
restart: always
|
|
ports:
|
|
- "11434:11434"
|
|
volumes:
|
|
- ./container_data:/root/.ollama:z
|
|
devices:
|
|
- /dev/kfd
|
|
- /dev/dri
|
|
|
|
llama_server_nvidia:
|
|
image: ollama/ollama:latest
|
|
ports:
|
|
- "11434:11434"
|
|
volumes:
|
|
- ./container_data:/root/.ollama:z
|
|
privileged: true # Had to be done so nvidia can find gpus...
|
|
restart: always
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
count: 1 # alternatively, use `count: all` for all GPUs
|
|
capabilities: [gpu]
|
|
|