ai-code/models/gguf/qwen3-coder/config.toml
2026-02-24 21:14:02 +00:00

15 lines
317 B
TOML

[server]
# The specific GGUF file to run inside this folder
model_file = "Qwen3-Coder-Next-BF16-00001-of-00004.gguf"
# Hardware Settings
n_gpu_layers = 99
ctx_size = 131072
threads = 24
# Network Settings
port = 8082
host = "0.0.0.0"
# Optional: Set a friendly alias for tools like OpenCode
alias = "qwen3-coder"