From 9b962ad4a43f338a3441de602f8577eabc7ab487 Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Tue, 17 Mar 2026 15:32:37 +0000 Subject: [PATCH 01/11] mod: volume for config --- docker-images/Dockerfile.agent | 47 ++------------------------------ opencode.cfg/oh-my-opencode.json | 10 +++++++ zsh.add | 1 + 3 files changed, 14 insertions(+), 44 deletions(-) create mode 100644 opencode.cfg/oh-my-opencode.json diff --git a/docker-images/Dockerfile.agent b/docker-images/Dockerfile.agent index 232a78c..c983255 100644 --- a/docker-images/Dockerfile.agent +++ b/docker-images/Dockerfile.agent @@ -28,9 +28,10 @@ ENV PATH="$BUN_INSTALL/bin:$PATH" RUN curl -fsSL https://bun.sh/install | bash # use bun for node, npx -RUN cd /usr/local/bin && \ +RUN pushd /usr/local/bin && \ echo '#!/bin/sh' %3E npx && echo 'exec bunx "$@"' >> npx && chmod +x npx && \ - echo '#!/bin/sh' %3E node && echo 'exec bun "$@"' >> node && chmod +x node + echo '#!/bin/sh' %3E node && echo 'exec bun "$@"' >> node && chmod +x node && \ + popd # --- NEW SECTION: RUST SETUP --- @@ -73,49 +74,7 @@ ENV PATH="/root/.opencode/bin:$PATH" # 9. CONFIG PART 1: Hardware/Providers (opencode.json) # Maps your local ports to providers. -RUN mkdir -p /root/.config/opencode && \ -cat < /root/.config/opencode/opencode.json -{ - "plugin": [], - "theme": "system", - "model": "halo/qwen3-coder-next:latest", - "small_model": "halo/gemma3:4b", - "provider": { - "halo": { - "npm": "@ai-sdk/openai-compatible", - "name": "(local)", - "options": { "baseURL": "http://host.docker.internal:11434/v1" }, - "models": { "hf.co/unsloth/GLM-4.7-Flash-GGUF:Q6_K": { "name": "glm4.7" }, - "hf.co/LiquidAI/LFM2-24B-A2B-GGUF:Q8_0": { "name": "lfm2" }, - "qwen3-coder-next:latest": { "name": "qwen3" }, - "gemma3:4b": {"name": "Gemma"} - } - } - - }, - "server": { - "port": 4096, - "hostname": "0.0.0.0", - "mdns": true, - "cors": ["https://example.com"] - } -} -EOF -# 10. CONFIG PART 2: Agent Brains (oh-my-opencode.json) -# Maps Agent Roles to Providers. -RUN cat < /root/.config/opencode/oh-my-opencode.json -{ - "agents": { - "Sisyphus": { "model": "halo/glm47" }, - "Oracle": { "model": "halo/lfm2" }, - "Librarian": { "model": "halo/glm47" }, - "Hephaestus": { "model": "halo/qwen3" } - }, - "disabled_agents": ["multimodal-looker"], - "confirm_dangerous_actions": false -} -EOF #USER 1000:1000 WORKDIR /workspace diff --git a/opencode.cfg/oh-my-opencode.json b/opencode.cfg/oh-my-opencode.json new file mode 100644 index 0000000..3cb06c8 --- /dev/null +++ b/opencode.cfg/oh-my-opencode.json @@ -0,0 +1,10 @@ +{ + "agents": { + "Sisyphus": { "model": "halo/glm47" }, + "Oracle": { "model": "halo/lfm2" }, + "Librarian": { "model": "halo/glm47" }, + "Hephaestus": { "model": "halo/qwen3" } + }, + "disabled_agents": ["multimodal-looker"], + "confirm_dangerous_actions": false +} \ No newline at end of file diff --git a/zsh.add b/zsh.add index 2edf07c..08f53e3 100644 --- a/zsh.add +++ b/zsh.add @@ -14,6 +14,7 @@ function agent() { --name "$CONTAINER_NAME" \ --add-host=host.docker.internal:host-gateway \ -v "$TARGET_DIR:/workspace" \ + -v "$HOME/dev/ai/opencode.cfg:/root/.config/opencode" \ -v "$HOME/dev/ai/docker-images/starship.toml:/root/.config/starship.toml" \ -v opencode-cargo-cache:/root/.cargo \ -v opencode-go-cache:/root/go \ From 08616ffee2c04e8581463b1895502079dc68468f Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Tue, 17 Mar 2026 15:40:57 +0000 Subject: [PATCH 02/11] fix: pushd --- docker-images/Dockerfile.agent | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docker-images/Dockerfile.agent b/docker-images/Dockerfile.agent index c983255..8ce421e 100644 --- a/docker-images/Dockerfile.agent +++ b/docker-images/Dockerfile.agent @@ -28,10 +28,11 @@ ENV PATH="$BUN_INSTALL/bin:$PATH" RUN curl -fsSL https://bun.sh/install | bash # use bun for node, npx -RUN pushd /usr/local/bin && \ +RUN ( \ + cd /usr/local/bin && \ echo '#!/bin/sh' %3E npx && echo 'exec bunx "$@"' >> npx && chmod +x npx && \ echo '#!/bin/sh' %3E node && echo 'exec bun "$@"' >> node && chmod +x node && \ - popd + ) # --- NEW SECTION: RUST SETUP --- From 754e59ecbd362d20426043ac08130e1ae9f1d0f5 Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Tue, 17 Mar 2026 15:42:16 +0000 Subject: [PATCH 03/11] fix: pushd --- docker-images/Dockerfile.agent | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-images/Dockerfile.agent b/docker-images/Dockerfile.agent index 8ce421e..5ec3b29 100644 --- a/docker-images/Dockerfile.agent +++ b/docker-images/Dockerfile.agent @@ -31,7 +31,7 @@ RUN curl -fsSL https://bun.sh/install | bash RUN ( \ cd /usr/local/bin && \ echo '#!/bin/sh' %3E npx && echo 'exec bunx "$@"' >> npx && chmod +x npx && \ - echo '#!/bin/sh' %3E node && echo 'exec bun "$@"' >> node && chmod +x node && \ + echo '#!/bin/sh' %3E node && echo 'exec bun "$@"' >> node && chmod +x node \ ) From d2aa4fe1c98480d9df3845a7c56a6d2f4d90490e Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Tue, 17 Mar 2026 20:48:10 +0000 Subject: [PATCH 04/11] add: restore go setup --- docker-images/Dockerfile.agent | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docker-images/Dockerfile.agent b/docker-images/Dockerfile.agent index 5ec3b29..0ed3ab5 100644 --- a/docker-images/Dockerfile.agent +++ b/docker-images/Dockerfile.agent @@ -49,13 +49,13 @@ RUN cargo install agent-browser && \ # --- NEW SECTION: GO SETUP --- # 6. Install Go (Latest Stable) -# We target 'linux-arm64' because you are on Apple Silicon (M3). -#ARG GO_VER=1.26.1 -#ARG ARCH=amd64 -#RUN curl -OL https://golang.org/dl/go${GO_VER}.linux-${ARCH}.tar.gz && \ -# tar -C /usr/local -xzf go${GO_VER}.linux-${ARCH}.tar.gz && \ -# rm go${GO_VER}.linux-${ARCH}.tar.gz -#ENV PATH="/usr/local/go/bin:$PATH" + +ARG GO_VER=1.26.1 +ARG ARCH=amd64 +RUN curl -OL https://golang.org/dl/go${GO_VER}.linux-${ARCH}.tar.gz && \ + tar -C /usr/local -xzf go${GO_VER}.linux-${ARCH}.tar.gz && \ + rm go${GO_VER}.linux-${ARCH}.tar.gz +ENV PATH="/usr/local/go/bin:$PATH" # 6a basex https://files.basex.org/releases/12.2/BaseX122.zip From 16c5cdc9a66945a76a536207a3f4778e5b821ee4 Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Tue, 17 Mar 2026 22:46:51 +0000 Subject: [PATCH 05/11] add: model caps --- opencode.cfg/opencode.json | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/opencode.cfg/opencode.json b/opencode.cfg/opencode.json index e5ccfec..a095e7e 100644 --- a/opencode.cfg/opencode.json +++ b/opencode.cfg/opencode.json @@ -1,5 +1,5 @@ { - "plugin": [], + "plugin": [], "theme": "system", "model": "halo/qwen3-coder-next:latest", "small_model": "halo/gemma3:4b", @@ -7,19 +7,39 @@ "halo": { "npm": "@ai-sdk/openai-compatible", "name": "(local)", - "options": { "baseURL": "http://host.docker.internal:11434/v1" }, - "models": { "hf.co/unsloth/GLM-4.7-Flash-GGUF:Q6_K": { "name": "glm4.7" }, - "hf.co/LiquidAI/LFM2-24B-A2B-GGUF:Q8_0": { "name": "lfm2" }, - "qwen3-coder-next:latest": { "name": "qwen3" }, - "gemma3:4b": {"name": "Gemma"} - } + "options": { + "baseURL": "http://host.docker.internal:11434/v1" + }, + "models": { + "hf.co/unsloth/GLM-4.7-Flash-GGUF:Q6_K": { + "name": "glm4.7" + }, + "hf.co/LiquidAI/LFM2-24B-A2B-GGUF:Q8_0": { + "name": "lfm2" + }, + "qwen3-coder-next:latest": { + "name": "qwen3" + }, + "gemma3:4b": { + "name": "Gemma", + "capabilities": { + "modalities": { + "input": [ + "text", + "image" + ] + } + } + } } - + } }, "server": { "port": 4096, "hostname": "0.0.0.0", "mdns": true, - "cors": ["https://example.com"] + "cors": [ + "https://example.com" + ] } } \ No newline at end of file From 7fa236923d99a014b3407bc2ed03b1ec694df1b8 Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Tue, 17 Mar 2026 23:14:37 +0000 Subject: [PATCH 06/11] mod: json --- opencode.cfg/opencode.json | 44 ++++++++++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/opencode.cfg/opencode.json b/opencode.cfg/opencode.json index a095e7e..a53bcb7 100644 --- a/opencode.cfg/opencode.json +++ b/opencode.cfg/opencode.json @@ -21,15 +21,45 @@ "name": "qwen3" }, "gemma3:4b": { - "name": "Gemma", + "name": "Gemma *", "capabilities": { - "modalities": { - "input": [ - "text", - "image" - ] + "temperature": false, + "reasoning": false, + "attachment": false, + "toolcall": true, + "input": { + "text": true, + "audio": false, + "image": true, + "video": false, + "pdf": false + }, + "output": { + "text": true, + "audio": false, + "image": false, + "video": false, + "pdf": false + }, + "interleaved": false + }, + "cost": { + "input": 0, + "output": 0, + "cache": { + "read": 0, + "write": 0 } - } + }, + "options": {}, + "limit": { + "context": 0, + "output": 0 + }, + "headers": {}, + "family": "", + "release_date": "", + "variants": {} } } } From a342645c4b30e087e378e7c3f879ee540bb85e2b Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Thu, 19 Mar 2026 11:08:38 +0000 Subject: [PATCH 07/11] mod: config for gemma --- opencode.cfg/opencode.json | 58 +++++++++++++++----------------------- 1 file changed, 22 insertions(+), 36 deletions(-) diff --git a/opencode.cfg/opencode.json b/opencode.cfg/opencode.json index a53bcb7..ead9265 100644 --- a/opencode.cfg/opencode.json +++ b/opencode.cfg/opencode.json @@ -1,8 +1,8 @@ { + "$schema": "https://opencode.ai/config.json", "plugin": [], - "theme": "system", "model": "halo/qwen3-coder-next:latest", - "small_model": "halo/gemma3:4b", + "small_model": "gemma3:4b", "provider": { "halo": { "npm": "@ai-sdk/openai-compatible", @@ -22,43 +22,29 @@ }, "gemma3:4b": { "name": "Gemma *", - "capabilities": { - "temperature": false, - "reasoning": false, - "attachment": false, - "toolcall": true, - "input": { - "text": true, - "audio": false, - "image": true, - "video": false, - "pdf": false - }, - "output": { - "text": true, - "audio": false, - "image": false, - "video": false, - "pdf": false - }, - "interleaved": false + "temperature": false, + "reasoning": false, + "attachment": true, + "tool_call": false, + "modalities": { + "input": [ + "text", + "image" + ], // Explicitly declare image support + "output": [ + "text" + ] }, - "cost": { - "input": 0, - "output": 0, - "cache": { - "read": 0, - "write": 0 - } - }, - "options": {}, "limit": { - "context": 0, - "output": 0 + "context": 100000, // Maximum context window (input + output) + "output": 8192 // Maximum output tokens + }, + "options": { + "num_ctx": 16000 // Context window size for Ollama }, "headers": {}, - "family": "", - "release_date": "", + "family": "gemma", + "release_date": "13 March 2025", "variants": {} } } @@ -67,7 +53,7 @@ "server": { "port": 4096, "hostname": "0.0.0.0", - "mdns": true, + "mdns": false, "cors": [ "https://example.com" ] From 9ccafc46f74b0dc87fed62e3ca0ee2103c44981d Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Thu, 19 Mar 2026 23:01:31 +0000 Subject: [PATCH 08/11] add: model details --- opencode.cfg/opencode.json | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/opencode.cfg/opencode.json b/opencode.cfg/opencode.json index ead9265..1e9c1b6 100644 --- a/opencode.cfg/opencode.json +++ b/opencode.cfg/opencode.json @@ -18,10 +18,33 @@ "name": "lfm2" }, "qwen3-coder-next:latest": { - "name": "qwen3" + "name": "qwen3", + "family": "qwen3next", + "reasoning": false, + "tool_call": true, + "modalities": { + "input": [ + "text" + ], + "output": [ + "text" + ] + }, + "limit": { + "context": 128000, // Context window size + "output": 8192 // Output token limit + }, + "options": { + "num_ctx": 16000, // Context window size for Ollama + "temperature": 1.0, + "top_p": 0.95, + "top_k": 40, + "max_tokens": 8192 // Max response length + } }, "gemma3:4b": { - "name": "Gemma *", + "name": "Gemma 3:4b", + "family": "gemma", "temperature": false, "reasoning": false, "attachment": true, @@ -43,7 +66,6 @@ "num_ctx": 16000 // Context window size for Ollama }, "headers": {}, - "family": "gemma", "release_date": "13 March 2025", "variants": {} } From 08108715653aec35860df33e779e5854fad98811 Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Fri, 20 Mar 2026 18:07:30 +0000 Subject: [PATCH 09/11] add: ollama export --- export-ollama-model.py | 52 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 export-ollama-model.py diff --git a/export-ollama-model.py b/export-ollama-model.py new file mode 100644 index 0000000..f1c03dd --- /dev/null +++ b/export-ollama-model.py @@ -0,0 +1,52 @@ +# https://mitjamartini.com/posts/export-models-from-ollama/#now-where-is-the-script +import os +import json +import zipfile +import argparse +from pathlib import Path + +def get_model_manifest_path(registry, repository, model_name, model_tag): + return Path(f".ollama/models/manifests/{registry}/{repository}/{model_name}/{model_tag}") + +def get_blob_file_path(digest): + return Path(f".ollama/models/blobs/sha256-{digest.split(':')[1]}") + +def read_manifest(ollamamodels, manifest_path): + with open(Path.joinpath(ollamamodels, manifest_path), 'r') as file: + return json.load(file) + +def create_zip(ollamamodels, registry, repository, model_name, model_tag, output_zip): + manifest_path = get_model_manifest_path(registry, repository, model_name, model_tag) + manifest = read_manifest(ollamamodels, manifest_path) + + with zipfile.ZipFile(output_zip, 'w') as zipf: + # Add manifest file + zipf.write(Path.joinpath(ollamamodels, manifest_path), arcname=manifest_path.relative_to('.')) + + # Add blobs + for layer in manifest['layers']: + blob_path = get_blob_file_path(layer['digest']) + zipf.write(Path.joinpath(ollamamodels, blob_path), arcname=blob_path.relative_to('.')) + + # Add config blob + config_blob_path = get_blob_file_path(manifest['config']['digest']) + zipf.write(Path.joinpath(ollamamodels, config_blob_path), arcname=config_blob_path.relative_to('.')) + + print(f"Model '{repository}{model_name}:{model_tag}' exported successfully to '{output_zip}'") + print(f"You can import it to another Ollama instance with 'tar -xf __export.zip'") + +def main(): + homedir = Path.home() + parser = argparse.ArgumentParser(description='Export Ollama model to a zip file.') + parser.add_argument('model_name', type=str, help='Name of the model (e.g., gemma)') + parser.add_argument('model_tag', type=str, help='Tag of the model (e.g., 2b)') + parser.add_argument('--ollamamodels', type=str, default=homedir, help='The folder for OLLAMA_MODELS (default: homedir)') + parser.add_argument('--registry', type=str, default="registry.ollama.ai", help="The Ollama model registry.") + parser.add_argument('--repository', type=str, default="library", help="name of the repository, (eg. jina)") + parser.add_argument('--output', type=str, default='model_export.zip', help='Output zip file name') + args = parser.parse_args() + + create_zip(args.ollamamodels, args.registry, args.repository, args.model_name, args.model_tag, args.output) + +if __name__ == "__main__": + main() From 0c07e740668bf2dce56f2c11688ed3d50acec434 Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Fri, 20 Mar 2026 22:45:57 +0000 Subject: [PATCH 10/11] add: glm4.7 --- opencode.cfg/opencode.json | 128 +++++++++++++++++++++---------------- 1 file changed, 74 insertions(+), 54 deletions(-) diff --git a/opencode.cfg/opencode.json b/opencode.cfg/opencode.json index 1e9c1b6..231919c 100644 --- a/opencode.cfg/opencode.json +++ b/opencode.cfg/opencode.json @@ -12,16 +12,23 @@ }, "models": { "hf.co/unsloth/GLM-4.7-Flash-GGUF:Q6_K": { - "name": "glm4.7" + "name": "glm4.7", + "family": "glm", + "reasoning": true, + "tool_call": true, + "limit": { + "context": 131072, // Context window size + "output": 40000 // Output token limit + }, + "options": { + "num_ctx": 16000, // Context window size for Ollama + "temperature": 0.7, + "top_p": 1.0, + "max_tokens": 20000 // Max response length + }, }, "hf.co/LiquidAI/LFM2-24B-A2B-GGUF:Q8_0": { - "name": "lfm2" - }, - "qwen3-coder-next:latest": { - "name": "qwen3", - "family": "qwen3next", - "reasoning": false, - "tool_call": true, + "name": "lfm2", "modalities": { "input": [ "text" @@ -30,54 +37,67 @@ "text" ] }, - "limit": { - "context": 128000, // Context window size - "output": 8192 // Output token limit + "qwen3-coder-next:latest": { + "name": "qwen3", + "family": "qwen3next", + "reasoning": false, + "tool_call": true, + "limit": { + "context": 128000, // Context window size + "output": 8192 // Output token limit + }, + "options": { + "num_ctx": 16000, // Context window size for Ollama + "temperature": 1.0, + "top_p": 0.95, + "top_k": 40, + "max_tokens": 8192 // Max response length + }, + "modalities": { + "input": [ + "text" + ], + "output": [ + "text" + ] + } }, - "options": { - "num_ctx": 16000, // Context window size for Ollama - "temperature": 1.0, - "top_p": 0.95, - "top_k": 40, - "max_tokens": 8192 // Max response length + "gemma3:4b": { + "name": "Gemma 3:4b", + "family": "gemma", + "temperature": false, + "reasoning": false, + "attachment": true, + "tool_call": false, + "modalities": { + "input": [ + "text", + "image" + ], // Explicitly declare image support + "output": [ + "text" + ] + }, + "limit": { + "context": 100000, // Maximum context window (input + output) + "output": 8192 // Maximum output tokens + }, + "options": { + "num_ctx": 16000 // Context window size for Ollama + }, + "headers": {}, + "release_date": "13 March 2025", + "variants": {} } - }, - "gemma3:4b": { - "name": "Gemma 3:4b", - "family": "gemma", - "temperature": false, - "reasoning": false, - "attachment": true, - "tool_call": false, - "modalities": { - "input": [ - "text", - "image" - ], // Explicitly declare image support - "output": [ - "text" - ] - }, - "limit": { - "context": 100000, // Maximum context window (input + output) - "output": 8192 // Maximum output tokens - }, - "options": { - "num_ctx": 16000 // Context window size for Ollama - }, - "headers": {}, - "release_date": "13 March 2025", - "variants": {} } } + }, + "server": { + "port": 4096, + "hostname": "0.0.0.0", + "mdns": false, + "cors": [ + "https://example.com" + ] } - }, - "server": { - "port": 4096, - "hostname": "0.0.0.0", - "mdns": false, - "cors": [ - "https://example.com" - ] - } -} \ No newline at end of file + } \ No newline at end of file From a7f054db5320ac71353da568f2f1e00c97b91c52 Mon Sep 17 00:00:00 2001 From: Andy Bunce Date: Sat, 21 Mar 2026 22:07:13 +0000 Subject: [PATCH 11/11] fix:: json --- opencode.cfg/opencode.json | 123 +++++++++++++++++++------------------ 1 file changed, 62 insertions(+), 61 deletions(-) diff --git a/opencode.cfg/opencode.json b/opencode.cfg/opencode.json index 231919c..41ba81a 100644 --- a/opencode.cfg/opencode.json +++ b/opencode.cfg/opencode.json @@ -36,68 +36,69 @@ "output": [ "text" ] - }, - "qwen3-coder-next:latest": { - "name": "qwen3", - "family": "qwen3next", - "reasoning": false, - "tool_call": true, - "limit": { - "context": 128000, // Context window size - "output": 8192 // Output token limit - }, - "options": { - "num_ctx": 16000, // Context window size for Ollama - "temperature": 1.0, - "top_p": 0.95, - "top_k": 40, - "max_tokens": 8192 // Max response length - }, - "modalities": { - "input": [ - "text" - ], - "output": [ - "text" - ] - } - }, - "gemma3:4b": { - "name": "Gemma 3:4b", - "family": "gemma", - "temperature": false, - "reasoning": false, - "attachment": true, - "tool_call": false, - "modalities": { - "input": [ - "text", - "image" - ], // Explicitly declare image support - "output": [ - "text" - ] - }, - "limit": { - "context": 100000, // Maximum context window (input + output) - "output": 8192 // Maximum output tokens - }, - "options": { - "num_ctx": 16000 // Context window size for Ollama - }, - "headers": {}, - "release_date": "13 March 2025", - "variants": {} } + }, + "qwen3-coder-next:latest": { + "name": "qwen3", + "family": "qwen3next", + "reasoning": false, + "tool_call": true, + "limit": { + "context": 128000, // Context window size + "output": 8192 // Output token limit + }, + "options": { + "num_ctx": 16000, // Context window size for Ollama + "temperature": 1.0, + "top_p": 0.95, + "top_k": 40, + "max_tokens": 8192 // Max response length + }, + "modalities": { + "input": [ + "text" + ], + "output": [ + "text" + ] + } + }, + "gemma3:4b": { + "name": "Gemma 3:4b", + "family": "gemma", + "temperature": false, + "reasoning": false, + "attachment": true, + "tool_call": false, + "modalities": { + "input": [ + "text", + "image" + ], // Explicitly declare image support + "output": [ + "text" + ] + }, + "limit": { + "context": 100000, // Maximum context window (input + output) + "output": 8192 // Maximum output tokens + }, + "options": { + "num_ctx": 16000 // Context window size for Ollama + }, + "headers": {}, + "release_date": "13 March 2025", + "variants": {} } } - }, - "server": { - "port": 4096, - "hostname": "0.0.0.0", - "mdns": false, - "cors": [ - "https://example.com" - ] } - } \ No newline at end of file + }, + "server": { + "port": 4096, + "hostname": "0.0.0.0", + "mdns": false, + "cors": [ + "https://example.com" + ] + } +} \ No newline at end of file