Files
hermes-agent/session_prequery_runner.py

82 lines
2.6 KiB
Python
Executable File

#!/usr/bin/env python3
"""
session_prequery_runner.py — Write fresh context to .last_prequery.md
Called by:
1. The pre_task hook (when hermes is invoked with a message arg)
2. Kate's morning cron (so interactive sessions have fallback context)
Keeps queries short (<30 chars) so vector search returns results.
"""
import subprocess, os
GBRAIN_BUN = "/home/ghstshdw/.bun/bin/bun"
GBRAIN_CLI = "/home/ghstshdw/.bun/install/global/node_modules/gbrain/src/cli.ts"
OLLAMA_HOST = "http://100.127.137.64:11434"
PREQUERY_OUT = os.path.expanduser("~/.hermes/hooks/.last_prequery.md")
def gbrain_query(question, top_k=4):
env = os.environ.copy()
env["OLLAMA_HOST"] = OLLAMA_HOST
cmd = [GBRAIN_BUN, GBRAIN_CLI, "query", question, "-n", str(top_k)]
r = subprocess.run(cmd, capture_output=True, text=True, timeout=30, env=env)
return r.stdout if r.returncode == 0 else ""
def get_page(slug):
env = os.environ.copy()
env["OLLAMA_HOST"] = OLLAMA_HOST
cmd = [GBRAIN_BUN, GBRAIN_CLI, "get", slug]
r = subprocess.run(cmd, capture_output=True, text=True, timeout=15, env=env)
return r.stdout if r.returncode == 0 else ""
def parse_slugs(results):
slugs = []
for line in results.split("\n"):
line = line.strip()
if not line:
continue
if "] session/" in line:
slug = "session/" + line.split("] session/")[1].split(" --")[0]
slugs.append(slug)
elif "] " in line:
parts = line.split("] ", 1)
if len(parts) == 2 and "/" in parts[1].split(" --")[0]:
slug = parts[1].split(" --")[0].strip()
if slug and not slug.endswith("/"):
slugs.append(slug)
seen = set()
return [x for x in slugs if not (x in seen or seen.add(x))]
# Short queries — keep under ~30 chars for vector search
questions = [
"GreySec projects",
"project status",
"keystone ir",
"oisi sra",
"decisions pending",
"blockers",
]
pages = []
for q in questions:
results = gbrain_query(q)
slugs = parse_slugs(results)
for slug in slugs[:2]:
if any(p["slug"] == slug for p in pages):
continue
content = get_page(slug)
if content:
pages.append({"slug": slug, "content": content[:600]})
lines = ["## Relevant Past Sessions & Brain Pages (from gbrain)\n"]
for i, page in enumerate(pages, 1):
lines.append(f"### {i}. {page['slug']}")
lines.append(page["content"][:600])
lines.append("")
output = "\n".join(lines)
with open(PREQUERY_OUT, "w") as f:
f.write(output)
print(f"OK: {len(pages)} pages, {len(output)} bytes -> {PREQUERY_OUT}")