services: ollama: volumes: - ollama:/root/.ollama container_name: ollama pull_policy: always tty: true restart: unless-stopped image: ollama/ollama:${OLLAMA_DOCKER_TAG-latest} ports: - 11434:11434 environment: - OLLAMA_MAX_LOADED_MODELS=1 # devices: # - /dev/dri:/dev/dri open-webui: build: context: . args: OLLAMA_BASE_URL: '/ollama' dockerfile: Dockerfile image: ghcr.io/open-webui/open-webui:${WEBUI_DOCKER_TAG-latest} container_name: open-webui volumes: - open-webui:/app/backend/data depends_on: - ollama ports: - ${OPEN_WEBUI_PORT-3001}:8080 environment: - WEBUI_CONCURRENCY=1 - LOG_LEVEL=debug - 'OLLAMA_BASE_URL=http://ollama:11434' - 'WEBUI_SECRET_KEY=' - 'RAG_EMBEDDING_ENGINE=ollama' - 'AUDIO_STT_ENGINE=openai' deploy: resources: limits: cpus: '2' memory: 4G extra_hosts: - host.docker.internal:host-gateway restart: unless-stopped volumes: ollama: {} open-webui: {}