diff --git a/ollama-openwebui/.gitignore b/ollama-openwebui/.gitignore new file mode 100644 index 0000000..982e835 --- /dev/null +++ b/ollama-openwebui/.gitignore @@ -0,0 +1,2 @@ +ollama-data/ +openwebui-data/ diff --git a/ollama-openwebui/README.md b/ollama-openwebui/README.md new file mode 100644 index 0000000..388454f --- /dev/null +++ b/ollama-openwebui/README.md @@ -0,0 +1,18 @@ +# ollama / openwebui stack w/ gpu support + +## models + +https://ollama.com/library + +## setup + +https://hub.docker.com/r/ollama/ollama + +```bash +curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo \ + sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo +sudo dnf update +sudo dnf install nvidia-container-toolkit +sudo nvidia-ctk runtime configure --runtime=docker +sudo systemctl restart docker +``` diff --git a/ollama-openwebui/docker-compose.yml b/ollama-openwebui/docker-compose.yml new file mode 100644 index 0000000..d164d6e --- /dev/null +++ b/ollama-openwebui/docker-compose.yml @@ -0,0 +1,22 @@ +services: + webui: + image: ghcr.io/open-webui/open-webui:ollama + restart: unless-stopped + # environment: + # - OLLAMA_BASE_URL=http://localhost:11434 + # network_mode: "host" + ports: + - "4000:8080" + volumes: + - ./openwebui-data:/app/backend/data + - ./ollama-data:/root/.ollama + # extra_hosts: + # - "host.docker.internal:host-gateway" + deploy: + resources: + reservations: + devices: + - driver: nvidia + # count: 1 # alternatively, use `count: all` for all GPUs + count: all + capabilities: [gpu]