ai-code/opencode.cfg/opencode.json
2026-03-21 22:07:13 +00:00

104 lines
No EOL
2.7 KiB
JSON

{
"$schema": "https://opencode.ai/config.json",
"plugin": [],
"model": "halo/qwen3-coder-next:latest",
"small_model": "gemma3:4b",
"provider": {
"halo": {
"npm": "@ai-sdk/openai-compatible",
"name": "(local)",
"options": {
"baseURL": "http://host.docker.internal:11434/v1"
},
"models": {
"hf.co/unsloth/GLM-4.7-Flash-GGUF:Q6_K": {
"name": "glm4.7",
"family": "glm",
"reasoning": true,
"tool_call": true,
"limit": {
"context": 131072, // Context window size
"output": 40000 // Output token limit
},
"options": {
"num_ctx": 16000, // Context window size for Ollama
"temperature": 0.7,
"top_p": 1.0,
"max_tokens": 20000 // Max response length
},
},
"hf.co/LiquidAI/LFM2-24B-A2B-GGUF:Q8_0": {
"name": "lfm2",
"modalities": {
"input": [
"text"
],
"output": [
"text"
]
}
},
"qwen3-coder-next:latest": {
"name": "qwen3",
"family": "qwen3next",
"reasoning": false,
"tool_call": true,
"limit": {
"context": 128000, // Context window size
"output": 8192 // Output token limit
},
"options": {
"num_ctx": 16000, // Context window size for Ollama
"temperature": 1.0,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 8192 // Max response length
},
"modalities": {
"input": [
"text"
],
"output": [
"text"
]
}
},
"gemma3:4b": {
"name": "Gemma 3:4b",
"family": "gemma",
"temperature": false,
"reasoning": false,
"attachment": true,
"tool_call": false,
"modalities": {
"input": [
"text",
"image"
], // Explicitly declare image support
"output": [
"text"
]
},
"limit": {
"context": 100000, // Maximum context window (input + output)
"output": 8192 // Maximum output tokens
},
"options": {
"num_ctx": 16000 // Context window size for Ollama
},
"headers": {},
"release_date": "13 March 2025",
"variants": {}
}
}
}
},
"server": {
"port": 4096,
"hostname": "0.0.0.0",
"mdns": false,
"cors": [
"https://example.com"
]
}
}