ai-code/opencode.cfg/opencode.json
2026-03-17 23:14:37 +00:00

75 lines
No EOL
1.7 KiB
JSON

{
"plugin": [],
"theme": "system",
"model": "halo/qwen3-coder-next:latest",
"small_model": "halo/gemma3:4b",
"provider": {
"halo": {
"npm": "@ai-sdk/openai-compatible",
"name": "(local)",
"options": {
"baseURL": "http://host.docker.internal:11434/v1"
},
"models": {
"hf.co/unsloth/GLM-4.7-Flash-GGUF:Q6_K": {
"name": "glm4.7"
},
"hf.co/LiquidAI/LFM2-24B-A2B-GGUF:Q8_0": {
"name": "lfm2"
},
"qwen3-coder-next:latest": {
"name": "qwen3"
},
"gemma3:4b": {
"name": "Gemma *",
"capabilities": {
"temperature": false,
"reasoning": false,
"attachment": false,
"toolcall": true,
"input": {
"text": true,
"audio": false,
"image": true,
"video": false,
"pdf": false
},
"output": {
"text": true,
"audio": false,
"image": false,
"video": false,
"pdf": false
},
"interleaved": false
},
"cost": {
"input": 0,
"output": 0,
"cache": {
"read": 0,
"write": 0
}
},
"options": {},
"limit": {
"context": 0,
"output": 0
},
"headers": {},
"family": "",
"release_date": "",
"variants": {}
}
}
}
},
"server": {
"port": 4096,
"hostname": "0.0.0.0",
"mdns": true,
"cors": [
"https://example.com"
]
}
}