services: webui: image: ghcr.io/open-webui/open-webui:ollama restart: unless-stopped # environment: # - OLLAMA_BASE_URL=http://localhost:11434 # network_mode: "host" ports: - "4000:8080" volumes: - ./openwebui-data:/app/backend/data - ./ollama-data:/root/.ollama # extra_hosts: # - "host.docker.internal:host-gateway" deploy: resources: reservations: devices: - driver: nvidia # count: 1 # alternatively, use `count: all` for all GPUs count: all capabilities: [gpu]