This commit is contained in:
Andy Bunce 2026-03-25 17:54:02 +00:00
commit e0480bb53c
5 changed files with 165 additions and 63 deletions

View file

@ -28,9 +28,11 @@ ENV PATH="$BUN_INSTALL/bin:$PATH"
RUN curl -fsSL https://bun.sh/install | bash
# use bun for node, npx
RUN cd /usr/local/bin && \
RUN ( \
cd /usr/local/bin && \
echo '#!/bin/sh' %3E npx && echo 'exec bunx "$@"' >> npx && chmod +x npx && \
echo '#!/bin/sh' %3E node && echo 'exec bun "$@"' >> node && chmod +x node
echo '#!/bin/sh' %3E node && echo 'exec bun "$@"' >> node && chmod +x node \
)
# --- NEW SECTION: RUST SETUP ---
@ -48,13 +50,13 @@ RUN cargo install agent-browser && \
# --- NEW SECTION: GO SETUP ---
# 6. Install Go (Latest Stable)
# We target 'linux-arm64' because you are on Apple Silicon (M3).
#ARG GO_VER=1.26.1
#ARG ARCH=amd64
#RUN curl -OL https://golang.org/dl/go${GO_VER}.linux-${ARCH}.tar.gz && \
# tar -C /usr/local -xzf go${GO_VER}.linux-${ARCH}.tar.gz && \
# rm go${GO_VER}.linux-${ARCH}.tar.gz
#ENV PATH="/usr/local/go/bin:$PATH"
ARG GO_VER=1.26.1
ARG ARCH=amd64
RUN curl -OL https://golang.org/dl/go${GO_VER}.linux-${ARCH}.tar.gz && \
tar -C /usr/local -xzf go${GO_VER}.linux-${ARCH}.tar.gz && \
rm go${GO_VER}.linux-${ARCH}.tar.gz
ENV PATH="/usr/local/go/bin:$PATH"
# 6a basex https://files.basex.org/releases/12.2/BaseX122.zip
@ -74,49 +76,7 @@ ENV PATH="/root/.opencode/bin:$PATH"
# 9. CONFIG PART 1: Hardware/Providers (opencode.json)
# Maps your local ports to providers.
RUN mkdir -p /root/.config/opencode && \
cat <<EOF > /root/.config/opencode/opencode.json
{
"plugin": [],
"theme": "system",
"model": "halo/qwen3-coder-next:latest",
"small_model": "halo/gemma3:4b",
"provider": {
"halo": {
"npm": "@ai-sdk/openai-compatible",
"name": "(local)",
"options": { "baseURL": "http://host.docker.internal:11434/v1" },
"models": { "hf.co/unsloth/GLM-4.7-Flash-GGUF:Q6_K": { "name": "glm4.7" },
"hf.co/LiquidAI/LFM2-24B-A2B-GGUF:Q8_0": { "name": "lfm2" },
"qwen3-coder-next:latest": { "name": "qwen3" },
"gemma3:4b": {"name": "Gemma"}
}
}
},
"server": {
"port": 4096,
"hostname": "0.0.0.0",
"mdns": true,
"cors": ["https://example.com"]
}
}
EOF
# 10. CONFIG PART 2: Agent Brains (oh-my-opencode.json)
# Maps Agent Roles to Providers.
RUN cat <<EOF > /root/.config/opencode/oh-my-opencode.json
{
"agents": {
"Sisyphus": { "model": "halo/glm47" },
"Oracle": { "model": "halo/lfm2" },
"Librarian": { "model": "halo/glm47" },
"Hephaestus": { "model": "halo/qwen3" }
},
"disabled_agents": ["multimodal-looker"],
"confirm_dangerous_actions": false
}
EOF
#USER 1000:1000
WORKDIR /workspace

52
export-ollama-model.py Normal file
View file

@ -0,0 +1,52 @@
# https://mitjamartini.com/posts/export-models-from-ollama/#now-where-is-the-script
import os
import json
import zipfile
import argparse
from pathlib import Path
def get_model_manifest_path(registry, repository, model_name, model_tag):
return Path(f".ollama/models/manifests/{registry}/{repository}/{model_name}/{model_tag}")
def get_blob_file_path(digest):
return Path(f".ollama/models/blobs/sha256-{digest.split(':')[1]}")
def read_manifest(ollamamodels, manifest_path):
with open(Path.joinpath(ollamamodels, manifest_path), 'r') as file:
return json.load(file)
def create_zip(ollamamodels, registry, repository, model_name, model_tag, output_zip):
manifest_path = get_model_manifest_path(registry, repository, model_name, model_tag)
manifest = read_manifest(ollamamodels, manifest_path)
with zipfile.ZipFile(output_zip, 'w') as zipf:
# Add manifest file
zipf.write(Path.joinpath(ollamamodels, manifest_path), arcname=manifest_path.relative_to('.'))
# Add blobs
for layer in manifest['layers']:
blob_path = get_blob_file_path(layer['digest'])
zipf.write(Path.joinpath(ollamamodels, blob_path), arcname=blob_path.relative_to('.'))
# Add config blob
config_blob_path = get_blob_file_path(manifest['config']['digest'])
zipf.write(Path.joinpath(ollamamodels, config_blob_path), arcname=config_blob_path.relative_to('.'))
print(f"Model '{repository}{model_name}:{model_tag}' exported successfully to '{output_zip}'")
print(f"You can import it to another Ollama instance with 'tar -xf <modelname>_<tag>_export.zip'")
def main():
homedir = Path.home()
parser = argparse.ArgumentParser(description='Export Ollama model to a zip file.')
parser.add_argument('model_name', type=str, help='Name of the model (e.g., gemma)')
parser.add_argument('model_tag', type=str, help='Tag of the model (e.g., 2b)')
parser.add_argument('--ollamamodels', type=str, default=homedir, help='The folder for OLLAMA_MODELS (default: homedir)')
parser.add_argument('--registry', type=str, default="registry.ollama.ai", help="The Ollama model registry.")
parser.add_argument('--repository', type=str, default="library", help="name of the repository, (eg. jina)")
parser.add_argument('--output', type=str, default='model_export.zip', help='Output zip file name')
args = parser.parse_args()
create_zip(args.ollamamodels, args.registry, args.repository, args.model_name, args.model_tag, args.output)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,10 @@
{
"agents": {
"Sisyphus": { "model": "halo/glm47" },
"Oracle": { "model": "halo/lfm2" },
"Librarian": { "model": "halo/glm47" },
"Hephaestus": { "model": "halo/qwen3" }
},
"disabled_agents": ["multimodal-looker"],
"confirm_dangerous_actions": false
}

View file

@ -1,25 +1,104 @@
{
"$schema": "https://opencode.ai/config.json",
"plugin": [],
"theme": "system",
"model": "halo/qwen3-coder-next:latest",
"small_model": "halo/gemma3:4b",
"small_model": "gemma3:4b",
"provider": {
"halo": {
"npm": "@ai-sdk/openai-compatible",
"name": "(local)",
"options": { "baseURL": "http://host.docker.internal:11434/v1" },
"models": { "hf.co/unsloth/GLM-4.7-Flash-GGUF:Q6_K": { "name": "glm4.7" },
"hf.co/LiquidAI/LFM2-24B-A2B-GGUF:Q8_0": { "name": "lfm2" },
"qwen3-coder-next:latest": { "name": "qwen3" },
"gemma3:4b": {"name": "Gemma"}
}
"options": {
"baseURL": "http://host.docker.internal:11434/v1"
},
"models": {
"hf.co/unsloth/GLM-4.7-Flash-GGUF:Q6_K": {
"name": "glm4.7",
"family": "glm",
"reasoning": true,
"tool_call": true,
"limit": {
"context": 131072, // Context window size
"output": 40000 // Output token limit
},
"options": {
"num_ctx": 16000, // Context window size for Ollama
"temperature": 0.7,
"top_p": 1.0,
"max_tokens": 20000 // Max response length
},
},
"hf.co/LiquidAI/LFM2-24B-A2B-GGUF:Q8_0": {
"name": "lfm2",
"modalities": {
"input": [
"text"
],
"output": [
"text"
]
}
},
"qwen3-coder-next:latest": {
"name": "qwen3",
"family": "qwen3next",
"reasoning": false,
"tool_call": true,
"limit": {
"context": 128000, // Context window size
"output": 8192 // Output token limit
},
"options": {
"num_ctx": 16000, // Context window size for Ollama
"temperature": 1.0,
"top_p": 0.95,
"top_k": 40,
"max_tokens": 8192 // Max response length
},
"modalities": {
"input": [
"text"
],
"output": [
"text"
]
}
},
"gemma3:4b": {
"name": "Gemma 3:4b",
"family": "gemma",
"temperature": false,
"reasoning": false,
"attachment": true,
"tool_call": false,
"modalities": {
"input": [
"text",
"image"
], // Explicitly declare image support
"output": [
"text"
]
},
"limit": {
"context": 100000, // Maximum context window (input + output)
"output": 8192 // Maximum output tokens
},
"options": {
"num_ctx": 16000 // Context window size for Ollama
},
"headers": {},
"release_date": "13 March 2025",
"variants": {}
}
}
}
},
"server": {
"port": 4096,
"hostname": "0.0.0.0",
"mdns": true,
"cors": ["https://example.com"]
"mdns": false,
"cors": [
"https://example.com"
]
}
}

View file

@ -14,6 +14,7 @@ function agent() {
--name "$CONTAINER_NAME" \
--add-host=host.docker.internal:host-gateway \
-v "$TARGET_DIR:/workspace" \
-v "$HOME/dev/ai/opencode.cfg:/root/.config/opencode" \
-v "$HOME/dev/ai/docker-images/starship.toml:/root/.config/starship.toml" \
-v opencode-cargo-cache:/root/.cargo \
-v opencode-go-cache:/root/go \