This commit is contained in:
Andy Bunce 2026-02-24 21:14:02 +00:00
commit 37adbcc96f
4 changed files with 219 additions and 0 deletions

View file

@ -0,0 +1,99 @@
FROM python:3.12-slim
# 1. Install System Tools
# Added pkg-config and libssl-dev (Essential for Rust compilation)
RUN apt-get update && apt-get install -y \
git curl build-essential unzip sudo \
ffmpeg jq zsh pkg-config libssl-dev \
&& rm -rf /var/lib/apt/lists/*
# 2. Install Starship & Aliases
RUN curl -sS https://starship.rs/install.sh | sh -s -- -y
RUN echo 'eval "$(starship init zsh)"' >> /root/.zshrc
RUN echo 'alias ll="ls -al"' >> /root/.zshrc
# Shortcuts for new tools
RUN echo 'alias c="cargo"' >> /root/.zshrc
RUN echo 'alias g="go"' >> /root/.zshrc
# 3. Install 'uv' (Python)
COPY --from=ghcr.io/astral-sh/uv:latest /uv /bin/uv
# 4. Install Bun (JS/TS)
ENV BUN_INSTALL="/root/.bun"
ENV PATH="$BUN_INSTALL/bin:$PATH"
RUN curl -fsSL https://bun.sh/install | bash
# --- NEW SECTION: RUST SETUP ---
# 5. Install Rust (Official Script)
# Installs Cargo, Rustc, Rustfmt, etc.
ENV RUSTUP_HOME=/root/.rustup
ENV CARGO_HOME=/root/.cargo
ENV PATH="$CARGO_HOME/bin:$PATH"
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile default
# --- NEW SECTION: GO SETUP ---
# 6. Install Go (Latest Stable)
# We target 'linux-arm64' because you are on Apple Silicon (M3).
ARG GO_VER=1.23.4
RUN curl -OL https://golang.org/dl/go${GO_VER}.linux-arm64.tar.gz && \
tar -C /usr/local -xzf go${GO_VER}.linux-arm64.tar.gz && \
rm go${GO_VER}.linux-arm64.tar.gz
ENV PATH="/usr/local/go/bin:$PATH"
# 7. Install OpenCode CLI
RUN curl -fsSL https://opencode.ai/install | bash
ENV PATH="/root/.opencode/bin:$PATH"
# 8. Install Oh-My-OpenCode
RUN bunx oh-my-opencode@latest install --no-tui \
--claude=no --gemini=no --copilot=no
# 9. CONFIG PART 1: Hardware/Providers (opencode.json)
# Maps your local ports to providers.
RUN mkdir -p /root/.config/opencode && \
cat <<EOF > /root/.config/opencode/opencode.json
{
"plugin": ["oh-my-opencode"],
"theme": "oh-my-opencode",
"default_agent": "Sisyphus",
"provider": {
"local-planner": {
"npm": "@ai-sdk/openai-compatible",
"name": "GLM 4.7 (Planner)",
"options": { "baseURL": "http://host.docker.internal:9901/v1" },
"models": { "glm": { "name": "GLM 4.7" } }
},
"local-oracle": {
"npm": "@ai-sdk/openai-compatible",
"name": "Kimi K2.5 (Context)",
"options": { "baseURL": "http://host.docker.internal:9900/v1" },
"models": { "kimi": { "name": "Kimi K2.5" } }
},
"local-coder": {
"npm": "@ai-sdk/openai-compatible",
"name": "Qwen3 Next (Coder)",
"options": { "baseURL": "http://host.docker.internal:9902/v1" },
"models": { "qwen": { "name": "Qwen3 Next" } }
}
}
}
EOF
# 10. CONFIG PART 2: Agent Brains (oh-my-opencode.json)
# Maps Agent Roles to Providers.
RUN cat <<EOF > /root/.config/opencode/oh-my-opencode.json
{
"agents": {
"sisyphus": { "model": "local-planner/glm" },
"oracle": { "model": "local-oracle/kimi" },
"librarian": { "model": "local-oracle/kimi" },
"build": { "model": "local-coding/qwen" }
},
"disabled_agents": ["multimodal-looker"],
"confirm_dangerous_actions": false
}
EOF
WORKDIR /workspace
ENV SHELL=/bin/zsh
CMD ["/bin/zsh"]

View file

@ -0,0 +1,14 @@
# Use a Red Whale to indicate "Inside Container"
[container]
format = '[$symbol $name]($style) '
symbol = "🐳 "
style = "red bold"
# Turn the arrow red to warn you
[character]
success_symbol = "[➜](red bold)"
error_symbol = "[✗](red bold)"
# Disable the package version noise for cleaner look
[package]
disabled = true

View file

@ -0,0 +1,15 @@
[server]
# The specific GGUF file to run inside this folder
model_file = "Qwen3-Coder-Next-BF16-00001-of-00004.gguf"
# Hardware Settings
n_gpu_layers = 99
ctx_size = 131072
threads = 24
# Network Settings
port = 8082
host = "0.0.0.0"
# Optional: Set a friendly alias for tools like OpenCode
alias = "qwen3-coder"

91
models/launch.py Executable file
View file

@ -0,0 +1,91 @@
#!/usr/bin/env python3
import os
import sys
import argparse
import tomllib # Requires Python 3.11+ (Standard on macOS Sonoma/Sequoia)
import subprocess
# --- CONFIGURATION ---
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
GGUF_DIR = os.path.join(BASE_DIR, "gguf")
def load_config(model_name):
# 1. Locate the folder
model_path = os.path.join(GGUF_DIR, model_name)
config_file = os.path.join(model_path, "config.toml")
if not os.path.exists(config_file):
print(f"X Error: Config file not found at {config_file}")
sys.exit(1)
# 2. Parse TOML
with open(config_file, "rb") as f:
config = tomllib.load(f)
return config, model_path
def build_command(config, model_path):
server_conf = config.get("server", {})
# 1. Find the .gguf file
gguf_name = server_conf.get("model_file")
if not gguf_name:
print("X Error: 'model_file' missing in config.toml")
sys.exit(1)
full_model_path = os.path.join(model_path, gguf_name)
# 2. Build the llama-server command
cmd = ["llama-server"]
# Map TOML keys to CLI flags
# Key = TOML key, Value = CLI flag
args_map = {
"model_file": "-m", # We handle the path manually, but good for ref
"n_gpu_layers": "--n-gpu-layers",
"ctx_size": "--ctx-size",
"threads": "--threads",
"port": "--port",
"host": "--host",
"alias": "--alias",
}
cmd.extend(["-m", full_model_path])
for key, value in server_conf.items():
if key == "model_file":
continue # Already added
flag = args_map.get(key)
if flag:
cmd.extend([flag, str(value)])
return cmd
def main():
parser = argparse.ArgumentParser(description="Launch a local LLM from config.")
parser.add_argument("model", help="The name of the folder inside gguf/")
args = parser.parse_args()
print(f">> Loading configuration for: {args.model}...")
config, path = load_config(args.model)
cmd = build_command(config, path)
print(f"> Context: {path}")
print(f"> Command: {' '.join(cmd)}")
print("-" * 40)
try:
# Run and replace the python process (saves memory/PID handling)
os.execvp("llama-server", cmd)
except FileNotFoundError:
print("X Error: 'llama-server' not found in PATH.")
print(" Run: brew install llama.cpp")
if __name__ == "__main__":
main()