thedocks/ollama-intel-arc/docker-compose.yml
2025-07-01 16:01:06 +01:00

49 lines
1.3 KiB
YAML

services:
ollama-intel-arc:
image: intelanalytics/ipex-llm-inference-cpp-xpu:latest
container_name: ollama-intel-arc
restart: unless-stopped
devices:
- /dev/dri:/dev/dri
volumes:
- ollama-volume:/root/.ollama
ports:
- 11434:11434
environment:
- no_proxy=localhost,127.0.0.1
- OLLAMA_HOST=0.0.0.0
- DEVICE=Arc
- OLLAMA_INTEL_GPU=true
- OLLAMA_NUM_GPU=999
- ZES_ENABLE_SYSMAN=1
command: sh -c 'mkdir -p /llm/ollama && cd /llm/ollama && init-ollama && exec ./ollama serve'
open-webui:
image: ghcr.io/open-webui/open-webui:latest
container_name: open-webui
volumes:
- open-webui-volume:/app/backend/data
depends_on:
- ollama-intel-arc
ports:
- 4040:8080
environment:
- WEBUI_AUTH=False
- ENABLE_OPENAI_API=False
- ENABLE_OLLAMA_API=True
- ENABLE_IMAGE_GENERATION=True
- IMAGE_GENERATION_ENGINE=automatic1111
- IMAGE_GENERATION_MODEL=dreamshaper_8
- IMAGE_SIZE=400x400
- IMAGE_STEPS=8
- AUTOMATIC1111_BASE_URL=http://sdnext-ipex:7860/
- AUTOMATIC1111_CFG_SCALE=2
- AUTOMATIC1111_SAMPLER=DPM++ SDE
- AUTOMATIC1111_SCHEDULER=Karras
extra_hosts:
- host.docker.internal:host-gateway
restart: unless-stopped
volumes:
ollama-volume: {}
open-webui-volume: {}