Initial commit: Ollama GPU Switcher

Simple web UI to toggle OpenClaw agents between work mode (qwen3 on ollama)
and lab mode (groq cloud fallback), giving the lab agent exclusive GPU access.

Features:
- One-click mode switching
- Real-time agent status
- Lab model selector
- Direct config file patching + gateway restart
This commit is contained in:
2026-02-18 17:16:35 +00:00
commit 3366d6d9ec
5 changed files with 677 additions and 0 deletions

195
app.py Normal file
View File

@@ -0,0 +1,195 @@
#!/usr/bin/env python3
"""
Ollama GPU Switcher — Toggle OpenClaw agents between work mode (qwen3) and lab mode (GPU exclusive).
No LLM involved. Reads/writes openclaw.json directly, then signals the gateway to restart.
"""
import json
import os
import signal
import subprocess
import copy
from flask import Flask, jsonify, request, send_from_directory
app = Flask(__name__, static_folder="static")
CONFIG_PATH = os.environ.get("OPENCLAW_CONFIG", os.path.expanduser("~/.openclaw/openclaw.json"))
# Agents that use ollama and compete for GPU
OLLAMA_AGENTS = ["rex", "maddy", "coder", "research"]
WORK_PRIMARY = "ollama/qwen3-128k:14b"
LAB_PRIMARY = "groq/llama-3.3-70b-versatile"
def read_config():
with open(CONFIG_PATH, "r") as f:
return json.load(f)
def write_config(config):
with open(CONFIG_PATH, "w") as f:
json.dump(config, f, indent=2)
f.write("\n")
def restart_gateway():
"""Restart the openclaw gateway via CLI."""
try:
subprocess.run(["openclaw", "gateway", "restart"], timeout=10, capture_output=True)
return True
except Exception:
# Fallback: try SIGUSR1 to the gateway process
try:
result = subprocess.run(["pgrep", "-f", "openclaw.*gateway"], capture_output=True, text=True)
if result.stdout.strip():
pid = int(result.stdout.strip().split("\n")[0])
os.kill(pid, signal.SIGUSR1)
return True
except Exception:
pass
return False
def find_agent(config, agent_id):
for agent in config.get("agents", {}).get("list", []):
if agent.get("id") == agent_id:
return agent
return None
def detect_mode(config):
ollama_count = 0
groq_count = 0
for agent_id in OLLAMA_AGENTS:
agent = find_agent(config, agent_id)
if agent:
primary = agent.get("model", {}).get("primary", "")
if "ollama/" in primary:
ollama_count += 1
elif "groq/" in primary:
groq_count += 1
if ollama_count == len(OLLAMA_AGENTS):
return "work"
elif groq_count >= len(OLLAMA_AGENTS):
return "lab"
return "mixed"
@app.route("/")
def index():
return send_from_directory("static", "index.html")
@app.route("/api/status")
def status():
try:
config = read_config()
mode = detect_mode(config)
agent_details = []
for agent_id in OLLAMA_AGENTS:
agent = find_agent(config, agent_id)
if agent:
agent_details.append({
"id": agent["id"],
"name": agent.get("name", agent["id"]),
"model": agent.get("model", {}).get("primary", "unknown"),
})
lab = find_agent(config, "lab")
lab_info = {
"name": lab.get("name", "Eric") if lab else "Eric",
"model": lab.get("model", {}).get("primary", "unknown") if lab else "unknown",
}
# Subagents default
subagents_primary = (
config.get("agents", {})
.get("defaults", {})
.get("subagents", {})
.get("model", {})
.get("primary", "unknown")
)
return jsonify({
"ok": True,
"mode": mode,
"lab": lab_info,
"agents": agent_details,
"subagentsPrimary": subagents_primary,
})
except Exception as e:
return jsonify({"ok": False, "error": str(e)}), 500
@app.route("/api/switch", methods=["POST"])
def switch():
try:
data = request.json or {}
target_mode = data.get("mode", "work")
if target_mode == "lab":
new_primary = LAB_PRIMARY
elif target_mode == "work":
new_primary = WORK_PRIMARY
else:
return jsonify({"ok": False, "error": f"Unknown mode: {target_mode}"}), 400
config = read_config()
# Patch each agent's primary model
for agent_id in OLLAMA_AGENTS:
agent = find_agent(config, agent_id)
if agent:
if "model" not in agent:
agent["model"] = {}
agent["model"]["primary"] = new_primary
# Patch subagents default
config.setdefault("agents", {}).setdefault("defaults", {}).setdefault("subagents", {}).setdefault("model", {})
config["agents"]["defaults"]["subagents"]["model"]["primary"] = new_primary
write_config(config)
restarted = restart_gateway()
return jsonify({
"ok": True,
"mode": target_mode,
"restarted": restarted,
})
except Exception as e:
return jsonify({"ok": False, "error": str(e)}), 500
@app.route("/api/lab-model", methods=["POST"])
def set_lab_model():
try:
data = request.json or {}
model = data.get("model", "")
if not model:
return jsonify({"ok": False, "error": "No model specified"}), 400
config = read_config()
lab = find_agent(config, "lab")
if not lab:
return jsonify({"ok": False, "error": "Lab agent not found"}), 404
if "model" not in lab:
lab["model"] = {}
lab["model"]["primary"] = model
write_config(config)
restarted = restart_gateway()
return jsonify({"ok": True, "model": model, "restarted": restarted})
except Exception as e:
return jsonify({"ok": False, "error": str(e)}), 500
if __name__ == "__main__":
port = int(os.environ.get("PORT", 8585))
print(f"🔀 Ollama GPU Switcher running on http://0.0.0.0:{port}")
print(f"📄 Config: {CONFIG_PATH}")
app.run(host="0.0.0.0", port=port, debug=False)