{ "$schema": "https://opencode.ai/config.json", "plugin": [], "model": "halo/qwen3-coder-next:latest", "small_model": "gemma3:4b", "provider": { "halo": { "npm": "@ai-sdk/openai-compatible", "name": "(local)", "options": { "baseURL": "http://host.docker.internal:11434/v1" }, "models": { "hf.co/unsloth/GLM-4.7-Flash-GGUF:Q6_K": { "name": "glm4.7" }, "hf.co/LiquidAI/LFM2-24B-A2B-GGUF:Q8_0": { "name": "lfm2" }, "qwen3-coder-next:latest": { "name": "qwen3" }, "gemma3:4b": { "name": "Gemma *", "temperature": false, "reasoning": false, "attachment": true, "tool_call": false, "modalities": { "input": [ "text", "image" ], // Explicitly declare image support "output": [ "text" ] }, "limit": { "context": 100000, // Maximum context window (input + output) "output": 8192 // Maximum output tokens }, "options": { "num_ctx": 16000 // Context window size for Ollama }, "headers": {}, "family": "gemma", "release_date": "13 March 2025", "variants": {} } } } }, "server": { "port": 4096, "hostname": "0.0.0.0", "mdns": false, "cors": [ "https://example.com" ] } }