From fc6eafa44bf4c63577d881b004f55cb04f0e93f6 Mon Sep 17 00:00:00 2001
From: ettienne <12529192+jhweb@users.noreply.github.com>
Date: Tue, 21 Apr 2026 18:55:07 +0200
Subject: [PATCH 01/10] fix(livingdash): correct repo identity and default
branch in dashboard snapshot
Improve LivingDash snapshot detection so the UI shows BrainDrain MCP branding, reports the repository default branch, and generates a meaningful Braindrain repo brief with moving-part context.
Made-with: Cursor
---
braindrain/livingdash.py | 386 +++++++++++++++++++++++++++++++++++++++
tests/test_livingdash.py | 164 +++++++++++++++++
2 files changed, 550 insertions(+)
create mode 100644 braindrain/livingdash.py
create mode 100644 tests/test_livingdash.py
diff --git a/braindrain/livingdash.py b/braindrain/livingdash.py
new file mode 100644
index 0000000..f429604
--- /dev/null
+++ b/braindrain/livingdash.py
@@ -0,0 +1,386 @@
+from __future__ import annotations
+
+import json
+import os
+import secrets
+import socket
+import re
+import subprocess
+import sys
+from dataclasses import asdict, dataclass
+from pathlib import Path
+from typing import Any
+
+import yaml
+
+
+@dataclass(frozen=True)
+class RuntimePaths:
+ root: Path
+ server: Path
+ ui: Path
+ data: Path
+ snapshot: Path
+ status: Path
+ auth: Path
+ pid: Path
+
+
+SERVER_SHIM = """from braindrain.livingdash_sidecar import main\n\nif __name__ == "__main__":\n main()\n"""
+
+
+def _runtime_paths(project_root: Path) -> RuntimePaths:
+ root = project_root / ".ldash"
+ data = root / "data"
+ return RuntimePaths(
+ root=root,
+ server=root / "server",
+ ui=root / "ui",
+ data=data,
+ snapshot=data / "snapshot.json",
+ status=data / "status.json",
+ auth=data / "auth.json",
+ pid=data / "livingdash.pid",
+ )
+
+
+def ensure_livingdash_runtime(project_root: str | Path) -> RuntimePaths:
+ project_root = Path(project_root).expanduser().resolve()
+ paths = _runtime_paths(project_root)
+ for path in (paths.root, paths.server, paths.ui, paths.data):
+ path.mkdir(parents=True, exist_ok=True)
+ app_py = paths.server / "app.py"
+ if not app_py.exists():
+ app_py.write_text(SERVER_SHIM, encoding="utf-8")
+ return paths
+
+
+def _read_project_name(project_root: Path) -> str:
+ config_path = project_root / "config" / "hub_config.yaml"
+ if config_path.exists():
+ try:
+ data = yaml.safe_load(config_path.read_text(encoding="utf-8")) or {}
+ except Exception:
+ data = {}
+ if isinstance(data, dict):
+ name = data.get("project_name")
+ if isinstance(name, str) and name.strip():
+ normalized = name.strip().lower()
+ if normalized == "braindrain":
+ return "BrainDrain MCP"
+ return name.strip()
+ if project_root.name.lower() == "brain_mcp_hub":
+ return "BrainDrain MCP"
+ return project_root.name
+
+
+def _detect_env_files(project_root: Path) -> list[dict[str, str]]:
+ names = [".env", ".env.dev", ".env.prod", ".env.local", ".env.example"]
+ found: list[dict[str, str]] = []
+ for name in names:
+ path = project_root / name
+ if path.exists():
+ found.append({"name": name, "path": str(path)})
+ return found
+
+
+def _detect_agents(project_root: Path) -> dict[str, Any]:
+ agent_dir = project_root / ".cursor" / "agents"
+ agents = sorted(p.stem for p in agent_dir.glob("*.md")) if agent_dir.exists() else []
+ return {"count": len(agents), "items": agents}
+
+
+def _detect_git_state(project_root: Path) -> dict[str, Any]:
+ try:
+ current_branch = subprocess.run(
+ ["git", "branch", "--show-current"],
+ cwd=project_root,
+ check=True,
+ capture_output=True,
+ text=True,
+ ).stdout.strip()
+ except Exception:
+ current_branch = None
+
+ default_branch = None
+ try:
+ remote_head = subprocess.run(
+ ["git", "symbolic-ref", "--short", "refs/remotes/origin/HEAD"],
+ cwd=project_root,
+ check=True,
+ capture_output=True,
+ text=True,
+ ).stdout.strip()
+ if "/" in remote_head:
+ default_branch = remote_head.split("/", 1)[1]
+ except Exception:
+ default_branch = None
+
+ if not default_branch:
+ try:
+ has_main = subprocess.run(
+ ["git", "show-ref", "--verify", "--quiet", "refs/heads/main"],
+ cwd=project_root,
+ check=False,
+ capture_output=False,
+ ).returncode == 0
+ if has_main:
+ default_branch = "main"
+ except Exception:
+ default_branch = None
+
+ try:
+ dirty = subprocess.run(
+ ["git", "status", "--porcelain"],
+ cwd=project_root,
+ check=True,
+ capture_output=True,
+ text=True,
+ ).stdout.strip()
+ is_dirty = bool(dirty)
+ except Exception:
+ is_dirty = None
+
+ return {
+ "branch": default_branch or current_branch,
+ "current_branch": current_branch,
+ "default_branch": default_branch,
+ "dirty": is_dirty,
+ }
+
+
+def _detect_mcp_tools(project_root: Path) -> dict[str, Any]:
+ config_path = project_root / "config" / "hub_config.yaml"
+ active: list[dict[str, Any]] = []
+ if config_path.exists():
+ try:
+ data = yaml.safe_load(config_path.read_text(encoding="utf-8")) or {}
+ except Exception:
+ data = {}
+ for tool in (data.get("mcp_tools") or []):
+ if not isinstance(tool, dict):
+ continue
+ name = str(tool.get("name", "")).strip()
+ if not name:
+ continue
+ if bool(tool.get("hot", False)) or not bool(tool.get("defer_loading", True)):
+ active.append({"name": name, "hot": bool(tool.get("hot", False))})
+ return {"active": active, "count": len(active)}
+
+
+def _read_repo_brief(project_root: Path) -> str:
+ readme = project_root / "README.md"
+ if readme.exists():
+ lines = [line.strip() for line in readme.read_text(encoding="utf-8").splitlines()]
+ prose: list[str] = []
+ for line in lines:
+ if not line:
+ continue
+ if line.startswith(("#", "|", "```", "---", "- ")):
+ continue
+ if line.startswith(("**Version:**", "**Last Updated:**")):
+ continue
+ if re.fullmatch(r"[*_`~\s-]+", line):
+ continue
+ prose.append(line)
+ if prose:
+ return " ".join(prose[:2]).strip()
+ return f"{project_root.name} local dashboard snapshot."
+
+
+def _compose_repo_brief(project_root: Path, mcp_tools: dict[str, Any], startup_flow: dict[str, Any]) -> str:
+ readme_brief = _read_repo_brief(project_root)
+ hot_tools = int(mcp_tools.get("count", 0) or 0)
+ startup_steps = len((startup_flow.get("steps", []) or []))
+ moving_parts = f"Moving parts: {hot_tools} active MCP tools, {startup_steps} startup stages, and signal-first telemetry."
+ return f"{readme_brief} {moving_parts}"
+
+
+def _detect_key_modules(project_root: Path) -> list[dict[str, str]]:
+ candidates = [
+ ("server", project_root / "braindrain" / "server.py"),
+ ("config", project_root / "braindrain" / "config.py"),
+ ("workflow_engine", project_root / "braindrain" / "workflow_engine.py"),
+ ("telemetry", project_root / "braindrain" / "telemetry.py"),
+ ]
+ return [
+ {"id": key, "path": str(path.relative_to(project_root))}
+ for key, path in candidates
+ if path.exists()
+ ]
+
+
+def _detect_startup_flow(project_root: Path) -> dict[str, Any]:
+ server_file = project_root / "braindrain" / "server.py"
+ text = server_file.read_text(encoding="utf-8") if server_file.exists() else ""
+ step_defs = [
+ ("load_config", "config = Config", "Load config"),
+ ("build_registry", "registry = ToolRegistry", "Build registry"),
+ ("init_telemetry", "telemetry = telemetry_from_config", "Init telemetry"),
+ ("expose_mcp_tools", "@mcp.tool()", "Expose MCP tools"),
+ ]
+ steps = [
+ {"id": step_id, "label": label}
+ for step_id, marker, label in step_defs
+ if marker in text
+ ]
+ return {"title": "Startup Flow", "steps": steps}
+
+
+def _detect_map_summary(project_root: Path) -> dict[str, Any]:
+ py_files = list(project_root.rglob("*.py"))
+ return {
+ "mode": "2d_systems_map",
+ "nodes": len(py_files),
+ "hotspots": min(7, len(py_files)),
+ "key_modules": _detect_key_modules(project_root),
+ }
+
+
+def build_dashboard_snapshot(project_root: str | Path) -> dict[str, Any]:
+ project_root = Path(project_root).expanduser().resolve()
+ workspace_name = project_root.name
+ project_name = _read_project_name(project_root)
+ git = _detect_git_state(project_root)
+ env_files = _detect_env_files(project_root)
+ agents = _detect_agents(project_root)
+ mcp_tools = _detect_mcp_tools(project_root)
+ startup_flow = _detect_startup_flow(project_root)
+ repo_brief = _compose_repo_brief(project_root, mcp_tools, startup_flow)
+
+ return {
+ "workspace": {"name": workspace_name, "root": str(project_root)},
+ "repo": {"project_name": project_name, "path": str(project_root)},
+ "workspace_signals": {
+ "env_files": env_files,
+ "agents": agents,
+ "git": git,
+ "mcp_tools": mcp_tools,
+ },
+ "narrative": {
+ "repo_brief": repo_brief,
+ "startup_flow": startup_flow,
+ "key_modules": _detect_key_modules(project_root),
+ },
+ "insights": {
+ "token_saving_active": True,
+ "env_drift": 0,
+ "primary_entrypoint_count": 1 if startup_flow["steps"] else 0,
+ },
+ "map_summary": _detect_map_summary(project_root),
+ "actions": [
+ {"id": "run_tests", "label": "Run tests", "kind": "command"},
+ {"id": "inspect_env", "label": "Inspect env", "kind": "view"},
+ {"id": "show_branches", "label": "Show branches", "kind": "view"},
+ {"id": "active_tools", "label": "Active MCP tools", "kind": "view"},
+ ],
+ }
+
+
+def _pick_port() -> int:
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
+ sock.bind(("127.0.0.1", 0))
+ return int(sock.getsockname()[1])
+
+
+class LivingDashManager:
+ def __init__(self, project_root: str | Path):
+ self.project_root = Path(project_root).expanduser().resolve()
+ self.paths = ensure_livingdash_runtime(self.project_root)
+
+ def _load_json(self, path: Path, default: dict[str, Any]) -> dict[str, Any]:
+ if not path.exists():
+ return default
+ try:
+ return json.loads(path.read_text(encoding="utf-8"))
+ except Exception:
+ return default
+
+ def _save_json(self, path: Path, payload: dict[str, Any]) -> None:
+ path.parent.mkdir(parents=True, exist_ok=True)
+ path.write_text(json.dumps(payload, indent=2), encoding="utf-8")
+
+ def ensure_auth(self) -> dict[str, str]:
+ auth = self._load_json(self.paths.auth, {})
+ if auth.get("password"):
+ return auth
+ auth = {
+ "username": "admin",
+ "password": secrets.token_urlsafe(12),
+ "session_secret": secrets.token_urlsafe(24),
+ }
+ self._save_json(self.paths.auth, auth)
+ return auth
+
+ def refresh(self) -> dict[str, Any]:
+ snapshot = build_dashboard_snapshot(self.project_root)
+ self._save_json(self.paths.snapshot, snapshot)
+ status = self._load_json(self.paths.status, {})
+ status.update(
+ {
+ "snapshot_path": str(self.paths.snapshot),
+ "project_root": str(self.project_root),
+ "running": bool(status.get("running", False)),
+ }
+ )
+ self._save_json(self.paths.status, status)
+ return {"ok": True, "snapshot_path": str(self.paths.snapshot)}
+
+ def status(self) -> dict[str, Any]:
+ status = self._load_json(self.paths.status, {})
+ pid = status.get("pid")
+ running = bool(status.get("running", False))
+ if pid:
+ try:
+ os.kill(int(pid), 0)
+ except OSError:
+ running = False
+ else:
+ running = False
+ return {
+ "running": running,
+ "pid": pid,
+ "url": status.get("url"),
+ "paths": asdict(self.paths),
+ }
+
+ def start(self) -> dict[str, Any]:
+ auth = self.ensure_auth()
+ self.refresh()
+ port = _pick_port()
+ env = os.environ.copy()
+ env["LIVINGDASH_PROJECT_ROOT"] = str(self.project_root)
+ env["LIVINGDASH_DATA_DIR"] = str(self.paths.data)
+ env["LIVINGDASH_UI_DIST"] = str(self.paths.ui / "dist")
+ env["LIVINGDASH_SESSION_SECRET"] = auth["session_secret"]
+ proc = subprocess.Popen(
+ [sys.executable, "-m", "braindrain.livingdash_sidecar", "--port", str(port)],
+ cwd=self.project_root,
+ env=env,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ status = {
+ "running": True,
+ "pid": proc.pid,
+ "url": f"http://127.0.0.1:{port}",
+ "project_root": str(self.project_root),
+ "snapshot_path": str(self.paths.snapshot),
+ }
+ self.paths.pid.write_text(str(proc.pid), encoding="utf-8")
+ self._save_json(self.paths.status, status)
+ return {"ok": True, "url": status["url"], "pid": proc.pid, "credentials": auth}
+
+ def stop(self) -> dict[str, Any]:
+ status = self._load_json(self.paths.status, {})
+ pid = status.get("pid")
+ if pid:
+ try:
+ os.kill(int(pid), 15)
+ except OSError:
+ pass
+ status.update({"running": False, "pid": None})
+ self._save_json(self.paths.status, status)
+ if self.paths.pid.exists():
+ self.paths.pid.unlink()
+ return {"ok": True, "stopped": True}
diff --git a/tests/test_livingdash.py b/tests/test_livingdash.py
new file mode 100644
index 0000000..dab9aa1
--- /dev/null
+++ b/tests/test_livingdash.py
@@ -0,0 +1,164 @@
+from __future__ import annotations
+
+import json
+import shutil
+import subprocess
+import uuid
+from pathlib import Path
+
+import pytest
+from fastapi.testclient import TestClient
+
+from braindrain.livingdash import (
+ LivingDashManager,
+ build_dashboard_snapshot,
+ ensure_livingdash_runtime,
+)
+from braindrain.livingdash_sidecar import create_app
+
+_REPO_ROOT = Path(__file__).resolve().parent.parent
+
+
+@pytest.fixture
+def tmp_project_dir() -> Path:
+ d = _REPO_ROOT / ".pytest_tmp" / f"ldash-{uuid.uuid4().hex[:12]}"
+ d.mkdir(parents=True, exist_ok=True)
+ try:
+ yield d
+ finally:
+ shutil.rmtree(d, ignore_errors=True)
+
+
+def _make_sample_project(root: Path) -> Path:
+ project = root / "sample-project"
+ (project / "braindrain").mkdir(parents=True, exist_ok=True)
+ (project / "config").mkdir(parents=True, exist_ok=True)
+ (project / ".cursor" / "agents").mkdir(parents=True, exist_ok=True)
+
+ (project / "README.md").write_text(
+ "# braindrain\n\n"
+ "An MCP server that keeps AI agents lean by caching environment context,\n"
+ "deferring heavy tools, and routing large outputs.\n",
+ encoding="utf-8",
+ )
+ (project / "config" / "hub_config.yaml").write_text(
+ 'project_name: "braindrain"\n'
+ "mcp_tools:\n"
+ " - name: get_env_context\n"
+ " hot: true\n"
+ " - name: search_tools\n"
+ " hot: true\n",
+ encoding="utf-8",
+ )
+ (project / "braindrain" / "server.py").write_text(
+ "config = Config('config/hub_config.yaml')\n"
+ "registry = ToolRegistry(config.data)\n"
+ "telemetry = telemetry_from_config({})\n"
+ "@mcp.tool()\n"
+ "def search_tools():\n"
+ " return {}\n",
+ encoding="utf-8",
+ )
+ (project / ".env.dev").write_text("OPENAI_API_KEY=test\n", encoding="utf-8")
+ (project / ".cursor" / "agents" / "research.md").write_text("# research\n", encoding="utf-8")
+ (project / ".cursor" / "agents" / "gitops.md").write_text("# gitops\n", encoding="utf-8")
+
+ subprocess.run(
+ ["git", "init", "-b", "main"],
+ cwd=project,
+ check=True,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ return project
+
+
+def test_ensure_livingdash_runtime_creates_isolated_layout(tmp_project_dir: Path) -> None:
+ project = _make_sample_project(tmp_project_dir)
+
+ runtime = ensure_livingdash_runtime(project)
+
+ root = project / ".ldash"
+ assert runtime.root == root
+ assert (root / "server").is_dir()
+ assert (root / "ui").is_dir()
+ assert (root / "data").is_dir()
+ assert (root / "server" / "app.py").is_file()
+
+
+def test_build_dashboard_snapshot_collects_workspace_signals(tmp_project_dir: Path) -> None:
+ project = _make_sample_project(tmp_project_dir)
+
+ snapshot = build_dashboard_snapshot(project)
+
+ assert snapshot["workspace"]["name"] == "sample-project"
+ assert snapshot["repo"]["project_name"] == "BrainDrain MCP"
+ assert snapshot["workspace_signals"]["git"]["branch"] == "main"
+ assert snapshot["workspace_signals"]["git"]["default_branch"] == "main"
+ assert snapshot["workspace_signals"]["agents"]["count"] == 2
+ env_names = {item["name"] for item in snapshot["workspace_signals"]["env_files"]}
+ assert ".env.dev" in env_names
+ hot_tools = {item["name"] for item in snapshot["workspace_signals"]["mcp_tools"]["active"]}
+ assert {"get_env_context", "search_tools"} <= hot_tools
+
+
+def test_build_dashboard_snapshot_derives_repo_brief_and_startup_flow(tmp_project_dir: Path) -> None:
+ project = _make_sample_project(tmp_project_dir)
+
+ snapshot = build_dashboard_snapshot(project)
+
+ assert "keeps AI agents lean" in snapshot["narrative"]["repo_brief"]
+ assert "Moving parts:" in snapshot["narrative"]["repo_brief"]
+ assert "**Version:**" not in snapshot["narrative"]["repo_brief"]
+ step_ids = [step["id"] for step in snapshot["narrative"]["startup_flow"]["steps"]]
+ assert step_ids == ["load_config", "build_registry", "init_telemetry", "expose_mcp_tools"]
+
+
+def test_manager_refresh_writes_snapshot_and_status_starts_stopped(tmp_project_dir: Path) -> None:
+ project = _make_sample_project(tmp_project_dir)
+ manager = LivingDashManager(project)
+
+ status = manager.status()
+ assert status["running"] is False
+ assert status["url"] is None
+
+ refreshed = manager.refresh()
+ snapshot_path = project / ".ldash" / "data" / "snapshot.json"
+
+ assert refreshed["ok"] is True
+ assert snapshot_path.is_file()
+ payload = json.loads(snapshot_path.read_text(encoding="utf-8"))
+ assert payload["workspace"]["name"] == "sample-project"
+
+
+def test_sidecar_auth_and_snapshot_endpoints(tmp_project_dir: Path) -> None:
+ project = _make_sample_project(tmp_project_dir)
+ manager = LivingDashManager(project)
+ auth = manager.ensure_auth()
+ manager.refresh()
+
+ app = create_app(
+ project_root=project,
+ data_dir=project / ".ldash" / "data",
+ ui_dist=project / ".ldash" / "ui" / "dist",
+ auth_config=auth,
+ )
+ client = TestClient(app)
+
+ session = client.get("/api/auth/session")
+ assert session.status_code == 200
+ assert session.json()["authenticated"] is False
+
+ login = client.post("/api/auth/login", json={"username": "admin", "password": auth["password"]})
+ assert login.status_code == 200
+
+ session = client.get("/api/auth/session")
+ assert session.json()["authenticated"] is True
+
+ snapshot = client.get("/api/snapshot")
+ assert snapshot.status_code == 200
+ assert snapshot.json()["narrative"]["startup_flow"]["steps"]
+
+ live = client.get("/api/live")
+ assert live.status_code == 200
+ assert "workspace_signals" in live.json()
From a90b7a2725d9121ae1fd66e19eed7ccd00a9afb2 Mon Sep 17 00:00:00 2001
From: ettienne <12529192+jhweb@users.noreply.github.com>
Date: Wed, 22 Apr 2026 10:08:04 +0200
Subject: [PATCH 02/10] fix(livingdash): move runtime state under .braindrain
and document boundary
Move LivingDash runtime data and secrets to .braindrain/ldash with legacy migration support, and document the storage split so .ldash can remain shareable scaffold code.
Made-with: Cursor
---
README.md | 11 +++++++++++
braindrain/livingdash.py | 28 ++++++++++++++++++++++++----
tests/test_livingdash.py | 28 ++++++++++++++++++++++------
3 files changed, 57 insertions(+), 10 deletions(-)
diff --git a/README.md b/README.md
index 6745c38..b7ec332 100644
--- a/README.md
+++ b/README.md
@@ -31,6 +31,17 @@ OS environment data is probed once, cached locally, and served instantly on ever
---
+## LivingDash storage boundary
+
+LivingDash uses a split layout so scaffold code can be versioned while runtime state stays local-only:
+
+- `.ldash/` contains dashboard scaffold and UI source/build files.
+- `.braindrain/ldash/data/` contains runtime and sensitive state (`auth.json`, `status.json`, `snapshot.json`, `livingdash.pid`).
+
+This keeps passwords/session secrets out of shareable dashboard scaffold paths and aligns with the project rule that `.braindrain/` is machine-local.
+
+---
+
## Tools
### Environment
diff --git a/braindrain/livingdash.py b/braindrain/livingdash.py
index f429604..4cbea6e 100644
--- a/braindrain/livingdash.py
+++ b/braindrain/livingdash.py
@@ -17,6 +17,7 @@
@dataclass(frozen=True)
class RuntimePaths:
root: Path
+ scaffold_root: Path
server: Path
ui: Path
data: Path
@@ -30,12 +31,14 @@ class RuntimePaths:
def _runtime_paths(project_root: Path) -> RuntimePaths:
- root = project_root / ".ldash"
+ scaffold_root = project_root / ".ldash"
+ root = project_root / ".braindrain" / "ldash"
data = root / "data"
return RuntimePaths(
root=root,
- server=root / "server",
- ui=root / "ui",
+ scaffold_root=scaffold_root,
+ server=scaffold_root / "server",
+ ui=scaffold_root / "ui",
data=data,
snapshot=data / "snapshot.json",
status=data / "status.json",
@@ -44,11 +47,28 @@ def _runtime_paths(project_root: Path) -> RuntimePaths:
)
+def _migrate_legacy_runtime_data(paths: RuntimePaths) -> None:
+ legacy_data = paths.scaffold_root / "data"
+ if not legacy_data.exists() or not legacy_data.is_dir():
+ return
+
+ for source, dest in (
+ (legacy_data / "snapshot.json", paths.snapshot),
+ (legacy_data / "status.json", paths.status),
+ (legacy_data / "auth.json", paths.auth),
+ (legacy_data / "livingdash.pid", paths.pid),
+ ):
+ if source.exists() and not dest.exists():
+ dest.parent.mkdir(parents=True, exist_ok=True)
+ dest.write_bytes(source.read_bytes())
+
+
def ensure_livingdash_runtime(project_root: str | Path) -> RuntimePaths:
project_root = Path(project_root).expanduser().resolve()
paths = _runtime_paths(project_root)
- for path in (paths.root, paths.server, paths.ui, paths.data):
+ for path in (paths.root, paths.scaffold_root, paths.server, paths.ui, paths.data):
path.mkdir(parents=True, exist_ok=True)
+ _migrate_legacy_runtime_data(paths)
app_py = paths.server / "app.py"
if not app_py.exists():
app_py.write_text(SERVER_SHIM, encoding="utf-8")
diff --git a/tests/test_livingdash.py b/tests/test_livingdash.py
index dab9aa1..33d49cb 100644
--- a/tests/test_livingdash.py
+++ b/tests/test_livingdash.py
@@ -78,12 +78,14 @@ def test_ensure_livingdash_runtime_creates_isolated_layout(tmp_project_dir: Path
runtime = ensure_livingdash_runtime(project)
- root = project / ".ldash"
+ root = project / ".braindrain" / "ldash"
+ scaffold = project / ".ldash"
assert runtime.root == root
- assert (root / "server").is_dir()
- assert (root / "ui").is_dir()
+ assert runtime.scaffold_root == scaffold
+ assert (scaffold / "server").is_dir()
+ assert (scaffold / "ui").is_dir()
assert (root / "data").is_dir()
- assert (root / "server" / "app.py").is_file()
+ assert (scaffold / "server" / "app.py").is_file()
def test_build_dashboard_snapshot_collects_workspace_signals(tmp_project_dir: Path) -> None:
@@ -123,7 +125,7 @@ def test_manager_refresh_writes_snapshot_and_status_starts_stopped(tmp_project_d
assert status["url"] is None
refreshed = manager.refresh()
- snapshot_path = project / ".ldash" / "data" / "snapshot.json"
+ snapshot_path = project / ".braindrain" / "ldash" / "data" / "snapshot.json"
assert refreshed["ok"] is True
assert snapshot_path.is_file()
@@ -139,7 +141,7 @@ def test_sidecar_auth_and_snapshot_endpoints(tmp_project_dir: Path) -> None:
app = create_app(
project_root=project,
- data_dir=project / ".ldash" / "data",
+ data_dir=project / ".braindrain" / "ldash" / "data",
ui_dist=project / ".ldash" / "ui" / "dist",
auth_config=auth,
)
@@ -162,3 +164,17 @@ def test_sidecar_auth_and_snapshot_endpoints(tmp_project_dir: Path) -> None:
live = client.get("/api/live")
assert live.status_code == 200
assert "workspace_signals" in live.json()
+
+
+def test_ensure_livingdash_runtime_migrates_legacy_ldash_data(tmp_project_dir: Path) -> None:
+ project = _make_sample_project(tmp_project_dir)
+ legacy_data = project / ".ldash" / "data"
+ legacy_data.mkdir(parents=True, exist_ok=True)
+ (legacy_data / "auth.json").write_text('{"username":"admin","password":"legacy"}', encoding="utf-8")
+ (legacy_data / "status.json").write_text('{"running": false}', encoding="utf-8")
+
+ runtime = ensure_livingdash_runtime(project)
+
+ assert runtime.auth.exists()
+ assert runtime.status.exists()
+ assert json.loads(runtime.auth.read_text(encoding="utf-8"))["password"] == "legacy"
From b550f8f953d6f4b69b8642673feef727a849cf3e Mon Sep 17 00:00:00 2001
From: ettienne <12529192+jhweb@users.noreply.github.com>
Date: Wed, 22 Apr 2026 10:17:23 +0200
Subject: [PATCH 03/10] feat: enhance Codex CLI support and add LivingDash
sidecar
- Updated configuration handling for Codex CLI to prioritize TOML files over JSON.
- Introduced a new LivingDash sidecar for improved UI and runtime state management.
- Added tests for Codex TOML detection and installer defaults.
- Updated .gitignore to include new log and documentation directories.
Made-with: Cursor - EC3
---
.gitignore | 8 +-
braindrain/env_probe.py | 21 +-
braindrain/livingdash_sidecar.py | 351 +++++++++++++++++++++++++++++++
scripts/install/configure_mcp.py | 13 +-
tests/test_env_probe_codex.py | 101 +++++++++
5 files changed, 480 insertions(+), 14 deletions(-)
create mode 100644 braindrain/livingdash_sidecar.py
create mode 100644 tests/test_env_probe_codex.py
diff --git a/.gitignore b/.gitignore
index 357046b..79a0591 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,11 @@ venv/
!.env.example
*.log
+*.logs
+.ldash/
+
+# Docs
+docs/
# QA / installer scratch (local-only)
QA-Logs/
@@ -47,11 +52,10 @@ config.toml.bak
CRUSH.md
# Tests are dev-only (not needed on consumer installs)
-tests/
-!tests/
tests/*
!tests/test_workspace_primer_hooks.py
!tests/test_scriptlib.py
+!tests/test_env_probe_codex.py
# Ruler outputs at repo root (no leading dot)
/AGENTS.md
diff --git a/braindrain/env_probe.py b/braindrain/env_probe.py
index f21de5d..08284c8 100644
--- a/braindrain/env_probe.py
+++ b/braindrain/env_probe.py
@@ -343,9 +343,10 @@
# ---------------------------------------------------------------------------
# IDE / Agent MCP config locations
-# Checked by path existence; content parsed if JSON/JSONC.
+# Checked by path existence; content parsed if JSON/JSONC, except Codex CLI
+# which is TOML-first at ~/.codex/config.toml.
# Format: (app_key, display_name, config_path, mcp_key_path)
-# mcp_key_path: dot-separated path into the JSON to find server map
+# mcp_key_path: dot-separated path into the config to find server map
# e.g. "mcpServers" or "mcp" or "context_servers"
# ---------------------------------------------------------------------------
@@ -374,7 +375,7 @@
"~/.config/claude/claude_desktop_config.json",
"mcpServers",
),
- ("codex_cli", "Codex CLI", "~/.codex/config.json", "mcpServers"),
+ ("codex_cli", "Codex CLI", "~/.codex/config.toml", "mcp_servers"),
("codex_openai", "Codex (OpenAI)", "~/.openai/mcp.json", "mcpServers"),
("continue", "Continue", "~/.continue/config.json", "mcpServers"),
("vscode", "VS Code", "~/.vscode/settings.json", "mcp.servers"),
@@ -423,6 +424,18 @@ def _read_json_file(path: Path) -> dict | None:
return None
+def _read_toml_file(path: Path) -> dict | None:
+ """Read a TOML file, return parsed dict or None."""
+ try:
+ import tomllib # Python 3.11+ stdlib
+
+ raw = path.read_text(encoding="utf-8", errors="ignore")
+ parsed = tomllib.loads(raw)
+ return parsed if isinstance(parsed, dict) else None
+ except Exception:
+ return None
+
+
def _get_nested(d: dict, dot_path: str) -> Any:
"""Walk a dot-separated key path into a nested dict."""
parts = dot_path.split(".")
@@ -456,7 +469,7 @@ def probe_app_configs() -> dict[str, Any]:
}
continue
- parsed = _read_json_file(path)
+ parsed = _read_toml_file(path) if app_key == "codex_cli" else _read_json_file(path)
mcp_block = _get_nested(parsed, mcp_key) if parsed else None
server_names: list[str] = []
diff --git a/braindrain/livingdash_sidecar.py b/braindrain/livingdash_sidecar.py
new file mode 100644
index 0000000..b528a59
--- /dev/null
+++ b/braindrain/livingdash_sidecar.py
@@ -0,0 +1,351 @@
+from __future__ import annotations
+
+import argparse
+import json
+import subprocess
+from pathlib import Path
+from typing import Any
+
+import uvicorn
+from fastapi import FastAPI, HTTPException, Request
+from fastapi.responses import FileResponse, HTMLResponse, JSONResponse
+from fastapi.staticfiles import StaticFiles
+from pydantic import BaseModel
+
+
+SESSION_COOKIE = "livingdash_session"
+
+
+class LoginPayload(BaseModel):
+ username: str
+ password: str
+
+
+def _load_json(path: Path, default: dict[str, Any]) -> dict[str, Any]:
+ if not path.exists():
+ return default
+ try:
+ return json.loads(path.read_text(encoding="utf-8"))
+ except Exception:
+ return default
+
+
+def _save_json(path: Path, payload: dict[str, Any]) -> None:
+ path.parent.mkdir(parents=True, exist_ok=True)
+ path.write_text(json.dumps(payload, indent=2), encoding="utf-8")
+
+
+def _is_authenticated(request: Request, auth_config: dict[str, str]) -> bool:
+ return request.cookies.get(SESSION_COOKIE) == auth_config.get("session_secret")
+
+
+def _require_auth(request: Request, auth_config: dict[str, str]) -> None:
+ if not _is_authenticated(request, auth_config):
+ raise HTTPException(status_code=401, detail="Authentication required")
+
+
+def _read_snapshot(data_dir: Path) -> dict[str, Any]:
+ return _load_json(data_dir / "snapshot.json", {})
+
+
+def _read_status(data_dir: Path) -> dict[str, Any]:
+ return _load_json(data_dir / "status.json", {})
+
+
+def _tone_from_state(value: Any, *, positive: set[str] | None = None, negative: set[str] | None = None) -> str:
+ text = str(value or "").strip().lower()
+ positive = positive or {"ok", "on", "hot", "clean", "active", "true"}
+ negative = negative or {"error", "off", "failed", "dirty", "down", "false", "blocked"}
+ if any(token in text for token in negative):
+ return "rose"
+ if any(token in text for token in positive):
+ return "emerald"
+ return "cyan"
+
+
+def _to_ui_snapshot(raw: dict[str, Any]) -> dict[str, Any]:
+ workspace = raw.get("workspace", {}) or {}
+ repo = raw.get("repo", {}) or {}
+ signals = raw.get("workspace_signals", {}) or {}
+ narrative = raw.get("narrative", {}) or {}
+ startup = (narrative.get("startup_flow", {}) or {}).get("steps", []) or []
+ key_modules = narrative.get("key_modules", []) or []
+ git = signals.get("git", {}) or {}
+ mcp = signals.get("mcp_tools", {}) or {}
+ agents = signals.get("agents", {}) or {}
+ env_files = signals.get("env_files", []) or []
+ insights = raw.get("insights", {}) or {}
+ map_summary = raw.get("map_summary", {}) or {}
+
+ branch = git.get("branch") or "unknown"
+ dirty = git.get("dirty")
+ git_state = "dirty" if dirty is True else ("clean" if dirty is False else "unknown")
+ entrypoint = key_modules[0]["path"] if key_modules and isinstance(key_modules[0], dict) else "unknown"
+
+ systems = [
+ {
+ "label": "Workspace",
+ "value": workspace.get("name", "unknown"),
+ "tone": "blue",
+ "detail": f"Root: {workspace.get('root', 'unknown')}",
+ },
+ {
+ "label": "MCP",
+ "value": f"{mcp.get('count', 0)} active",
+ "tone": "cyan",
+ "detail": "Hot-loaded MCP tools available to the runtime.",
+ },
+ {
+ "label": "Env",
+ "value": f"{len(env_files)} files",
+ "tone": "amber" if env_files else "rose",
+ "detail": "Environment files detected in workspace root.",
+ },
+ {
+ "label": "Git",
+ "value": git_state,
+ "tone": _tone_from_state(git_state, positive={"clean"}, negative={"dirty"}),
+ "detail": f"Branch: {branch}",
+ },
+ {
+ "label": "Agents",
+ "value": str(agents.get("count", 0)),
+ "tone": "violet",
+ "detail": "Detected local Cursor agent definitions.",
+ },
+ {
+ "label": "Risk",
+ "value": "low" if int(insights.get("env_drift", 0) or 0) == 0 else "elevated",
+ "tone": "emerald" if int(insights.get("env_drift", 0) or 0) == 0 else "rose",
+ "detail": "Derived from env drift and workspace health indicators.",
+ },
+ ]
+
+ return {
+ "workspaceName": workspace.get("name", "workspace"),
+ "projectName": repo.get("project_name", "LivingDash"),
+ "branch": branch,
+ "repoBrief": {
+ "title": "What this repo does",
+ "summary": narrative.get("repo_brief", "No repository brief available."),
+ "entrypoint": entrypoint,
+ "posture": "Snapshot-first with live workspace signals",
+ },
+ "facts": [
+ {"label": "Workspace", "value": workspace.get("name", "unknown"), "tone": "blue"},
+ {"label": "Project", "value": repo.get("project_name", "unknown"), "tone": "cyan"},
+ {"label": "MCP", "value": f"{mcp.get('count', 0)} active", "tone": "emerald"},
+ {"label": "Branch", "value": branch, "tone": "violet"},
+ {"label": "Agents", "value": str(agents.get("count", 0)), "tone": "amber"},
+ {"label": "Blockers", "value": "0", "tone": "rose"},
+ ],
+ "systems": systems,
+ "startupFlow": [
+ {
+ "label": step.get("label", "step"),
+ "detail": f"id: {step.get('id', 'unknown')}",
+ "tone": "blue" if idx == 0 else "cyan",
+ }
+ for idx, step in enumerate(startup)
+ if isinstance(step, dict)
+ ],
+ "actions": [
+ {"label": action.get("label", "Action"), "detail": f"kind: {action.get('kind', 'view')}", "tone": "blue"}
+ for action in (raw.get("actions", []) or [])
+ if isinstance(action, dict)
+ ],
+ "mapAccess": {
+ "label": "Bounded 2D systems map",
+ "description": "Secondary map view for drilling into repo structure and hotspots.",
+ "nodes": str(map_summary.get("nodes", 0)),
+ "edges": str(max(0, int(map_summary.get("nodes", 0) or 0) - 1)),
+ "hotspots": str(map_summary.get("hotspots", 0)),
+ "cta": "OPEN SYSTEM MAP",
+ },
+ }
+
+
+def _to_ui_live(raw: dict[str, Any], status: dict[str, Any]) -> dict[str, Any]:
+ signals = raw.get("workspace_signals", {}) or {}
+ insights = raw.get("insights", {}) or {}
+ mcp = signals.get("mcp_tools", {}) or {}
+ agents = signals.get("agents", {}) or {}
+ git = signals.get("git", {}) or {}
+ refresh_age = status.get("refresh_age_seconds", 0)
+
+ env_drift = int(insights.get("env_drift", 0) or 0)
+ token_saving = bool(insights.get("token_saving_active", False))
+
+ return {
+ "refreshedAt": f"{refresh_age}s ago" if refresh_age else "just now",
+ "activeTools": f"{mcp.get('count', 0)} active",
+ "agentsOnline": f"{agents.get('count', 0)} online",
+ "insights": [
+ {
+ "label": "Token saving",
+ "value": "on" if token_saving else "off",
+ "detail": "Telemetry indicates token-saving optimization status.",
+ "tone": "emerald" if token_saving else "rose",
+ },
+ {
+ "label": "Env drift",
+ "value": str(env_drift),
+ "detail": "Detected environment configuration drift count.",
+ "tone": "emerald" if env_drift == 0 else "amber",
+ },
+ {
+ "label": "Git state",
+ "value": "dirty" if git.get("dirty") else "clean",
+ "detail": f"Current branch: {git.get('branch') or 'unknown'}",
+ "tone": "amber" if git.get("dirty") else "emerald",
+ },
+ ],
+ "liveCounters": [
+ {"label": "Tools", "value": str(mcp.get("count", 0)), "tone": "emerald"},
+ {"label": "Agents", "value": str(agents.get("count", 0)), "tone": "cyan"},
+ {"label": "Blocks", "value": "0", "tone": "rose"},
+ {"label": "Refresh", "value": f"{refresh_age}s" if refresh_age else "0s", "tone": "violet"},
+ ],
+ }
+
+
+def create_app(
+ *,
+ project_root: Path,
+ data_dir: Path,
+ ui_dist: Path,
+ auth_config: dict[str, str],
+) -> FastAPI:
+ app = FastAPI(title="LivingDash Sidecar", docs_url=None, redoc_url=None)
+
+ if ui_dist.exists():
+ assets_dir = ui_dist / "assets"
+ if assets_dir.exists():
+ app.mount("/assets", StaticFiles(directory=assets_dir), name="assets")
+
+ @app.get("/health")
+ def health() -> dict[str, str]:
+ return {"status": "ok"}
+
+ @app.get("/api/auth/session")
+ def session(request: Request) -> dict[str, Any]:
+ return {
+ "authenticated": _is_authenticated(request, auth_config),
+ "userName": auth_config.get("username"),
+ "username": auth_config.get("username"),
+ }
+
+ @app.post("/api/auth/login")
+ def login(payload: LoginPayload) -> JSONResponse:
+ if payload.username != auth_config.get("username") or payload.password != auth_config.get("password"):
+ raise HTTPException(status_code=401, detail="Invalid credentials")
+ response = JSONResponse({"ok": True})
+ response.set_cookie(
+ SESSION_COOKIE,
+ auth_config.get("session_secret", ""),
+ httponly=True,
+ samesite="lax",
+ )
+ return response
+
+ @app.post("/api/auth/logout")
+ def logout() -> JSONResponse:
+ response = JSONResponse({"ok": True})
+ response.delete_cookie(SESSION_COOKIE)
+ return response
+
+ @app.get("/api/snapshot")
+ def snapshot(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ return _to_ui_snapshot(_read_snapshot(data_dir))
+
+ @app.get("/api/live")
+ def live(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ snapshot = _read_snapshot(data_dir)
+ status = _read_status(data_dir)
+ return _to_ui_live(snapshot, status)
+
+ @app.get("/api/actions")
+ def actions(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ snapshot = _read_snapshot(data_dir)
+ return {"actions": snapshot.get("actions", [])}
+
+ @app.post("/api/actions/run/{action_id}")
+ def run_action(action_id: str, request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ if action_id != "run_tests":
+ return {"ok": False, "action_id": action_id, "status": "view_only"}
+ result = subprocess.run(
+ ["./.venv/bin/python", "-m", "pytest", "tests/test_livingdash.py"],
+ cwd=project_root,
+ capture_output=True,
+ text=True,
+ )
+ return {
+ "ok": result.returncode == 0,
+ "action_id": action_id,
+ "stdout": result.stdout[-2000:],
+ "stderr": result.stderr[-1000:],
+ "returncode": result.returncode,
+ }
+
+ @app.get("/api/map")
+ def map_data(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ snapshot = _read_snapshot(data_dir)
+ return snapshot.get("map_summary", {})
+
+ @app.get("/{full_path:path}")
+ def index(full_path: str):
+ index_file = ui_dist / "index.html"
+ if index_file.exists():
+ return FileResponse(index_file)
+ fallback = """
+
+
+
LivingDash
+
+ LivingDash
+ The UI build has not been generated yet.
+
+
+ """
+ return HTMLResponse(fallback)
+
+ return app
+
+
+def _env_path(name: str) -> Path:
+ value = Path(Path.cwd()) if name == "LIVINGDASH_PROJECT_ROOT" else None
+ raw = value or Path(__import__("os").environ[name])
+ return Path(raw).expanduser().resolve()
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--port", type=int, default=7337)
+ args = parser.parse_args()
+
+ import os
+
+ project_root = Path(os.environ["LIVINGDASH_PROJECT_ROOT"]).expanduser().resolve()
+ data_dir = Path(os.environ["LIVINGDASH_DATA_DIR"]).expanduser().resolve()
+ ui_dist = Path(os.environ["LIVINGDASH_UI_DIST"]).expanduser().resolve()
+ auth_config = _load_json(data_dir / "auth.json", {})
+ if "session_secret" not in auth_config:
+ auth_config["session_secret"] = os.environ.get("LIVINGDASH_SESSION_SECRET", "")
+
+ app = create_app(
+ project_root=project_root,
+ data_dir=data_dir,
+ ui_dist=ui_dist,
+ auth_config=auth_config,
+ )
+ uvicorn.run(app, host="127.0.0.1", port=args.port, log_level="warning")
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/scripts/install/configure_mcp.py b/scripts/install/configure_mcp.py
index 938db69..50af8c2 100644
--- a/scripts/install/configure_mcp.py
+++ b/scripts/install/configure_mcp.py
@@ -177,7 +177,6 @@ def _build_targets(detected_configs: dict[str, Any]) -> list[Target | CliCommand
("opencode", "OpenCode", "~/.config/opencode/opencode.jsonc", "mcp"),
("antigravity", "Antigravity", "~/.gemini/antigravity/mcp_config.json", "mcpServers"),
("gemini_cli", "Gemini CLI", "~/.gemini/settings.json", "mcpServers"),
- ("codex_cli", "Codex CLI", "~/.codex/config.json", "mcpServers"),
("codex_openai", "Codex (OpenAI)", "~/.openai/mcp.json", "mcpServers"),
(
"claude_desktop",
@@ -202,6 +201,8 @@ def _build_targets(detected_configs: dict[str, Any]) -> list[Target | CliCommand
out: list[Target | CliCommandTarget] = []
for key, display, default_path, style in defaults:
probe = detected_configs.get(key) if isinstance(detected_configs, dict) else None
+ if key == "codex_cli_toml" and probe is None and isinstance(detected_configs, dict):
+ probe = detected_configs.get("codex_cli")
path = Path((probe or {}).get("config_path", default_path)).expanduser()
detected = bool((probe or {}).get("exists", False))
out.append(Target(key=key, display=display, path=path, style=style, detected=detected))
@@ -221,7 +222,7 @@ def _build_targets(detected_configs: dict[str, Any]) -> list[Target | CliCommand
def _ask_selection(targets: list[Target | CliCommandTarget]) -> list[Target | CliCommandTarget]:
print("\nSelect MCP targets to configure (interactive checklist):")
- print("Enter comma-separated numbers, 'all', or press Enter for detected-only.")
+ print("Enter comma-separated numbers, 'all', or press Enter for Cursor + Zed + Codex CLI (TOML).")
for idx, target in enumerate(targets, start=1):
marker = "detected" if target.detected else "not-detected"
exists = "exists" if target.path.exists() else "new-file"
@@ -229,11 +230,8 @@ def _ask_selection(targets: list[Target | CliCommandTarget]) -> list[Target | Cl
choice = input("\nSelection: ").strip().lower()
if not choice:
- selected = [t for t in targets if t.detected]
- if selected:
- return selected
- print("No detected configs found; defaulting to Cursor + Zed + Codex CLI.")
- preferred = {"cursor", "zed", "codex_cli"}
+ print("Defaulting to Cursor + Zed + Codex CLI (TOML).")
+ preferred = {"cursor", "zed", "codex_cli_toml"}
return [t for t in targets if t.key in preferred]
if choice == "all":
@@ -356,4 +354,3 @@ def main() -> int:
if __name__ == "__main__":
raise SystemExit(main())
-
diff --git a/tests/test_env_probe_codex.py b/tests/test_env_probe_codex.py
new file mode 100644
index 0000000..c888b8b
--- /dev/null
+++ b/tests/test_env_probe_codex.py
@@ -0,0 +1,101 @@
+"""Tests for Codex TOML detection and installer defaults."""
+
+from __future__ import annotations
+
+from pathlib import Path
+
+from braindrain.env_probe import probe_app_configs
+from scripts.install import configure_mcp
+
+_ask_selection = configure_mcp._ask_selection
+_build_targets = configure_mcp._build_targets
+
+
+def test_probe_app_configs_reports_codex_cli_from_toml_and_preserves_json_apps(
+ tmp_path: Path, monkeypatch
+) -> None:
+ home = tmp_path / "home"
+ codex_dir = home / ".codex"
+ cursor_dir = home / ".cursor"
+ codex_dir.mkdir(parents=True)
+ cursor_dir.mkdir(parents=True)
+
+ (codex_dir / "config.toml").write_text(
+ """
+[mcp_servers.braindrain]
+command = "/bin/true"
+args = []
+""".strip()
+ + "\n",
+ encoding="utf-8",
+ )
+ (cursor_dir / "mcp.json").write_text(
+ """{"mcpServers":{"braindrain":{"command":"/bin/true","args":[]}}}\n""",
+ encoding="utf-8",
+ )
+
+ monkeypatch.setenv("HOME", str(home))
+
+ configs = probe_app_configs()
+
+ codex = configs["codex_cli"]
+ assert codex["exists"] is True
+ assert codex["config_path"] == str(codex_dir / "config.toml")
+ assert codex["mcp_servers"] == ["braindrain"]
+
+ cursor = configs["cursor"]
+ assert cursor["exists"] is True
+ assert cursor["config_path"] == str(cursor_dir / "mcp.json")
+ assert cursor["mcp_servers"] == ["braindrain"]
+
+
+def test_build_targets_collapses_codex_to_single_toml_target() -> None:
+ targets = _build_targets(
+ {
+ "cursor": {
+ "exists": True,
+ "config_path": "/tmp/cursor.json",
+ },
+ "zed": {
+ "exists": True,
+ "config_path": "/tmp/zed.json",
+ },
+ "codex_cli": {
+ "exists": True,
+ "config_path": "/tmp/home/.codex/config.toml",
+ },
+ }
+ )
+
+ keys = [target.key for target in targets]
+ assert "codex_cli" not in keys
+ assert keys.count("codex_cli_toml") == 1
+
+ codex = next(target for target in targets if target.key == "codex_cli_toml")
+ assert codex.detected is True
+ assert codex.style == "toml_mcp_servers"
+ assert str(codex.path).endswith(".codex/config.toml")
+
+
+def test_empty_installer_selection_defaults_to_cursor_zed_and_codex_toml(
+ monkeypatch, capsys
+) -> None:
+ targets = _build_targets(
+ {
+ "cursor": {"exists": True, "config_path": "/tmp/cursor.json"},
+ "zed": {"exists": True, "config_path": "/tmp/zed.json"},
+ "codex_cli": {
+ "exists": True,
+ "config_path": "/tmp/home/.codex/config.toml",
+ },
+ }
+ )
+ monkeypatch.setattr("builtins.input", lambda prompt="": "")
+
+ selected = _ask_selection(targets)
+ keys = [target.key for target in selected]
+
+ assert keys == ["cursor", "zed", "codex_cli_toml"]
+
+ out = capsys.readouterr().out
+ assert "Cursor + Zed + Codex CLI (TOML)" in out
From f9da5e31f18998d1e369895e80c0c5e2b784615e Mon Sep 17 00:00:00 2001
From: ettienne <12529192+jhweb@users.noreply.github.com>
Date: Thu, 23 Apr 2026 12:16:27 +0200
Subject: [PATCH 04/10] Implement LDash operational MVP
---
.gitignore | 1 -
braindrain/livingdash.py | 97 +++-
braindrain/livingdash_sidecar.py | 906 +++++++++++++++++++++++++------
tests/test_livingdash.py | 53 +-
4 files changed, 888 insertions(+), 169 deletions(-)
diff --git a/.gitignore b/.gitignore
index 79a0591..9f576e9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,7 +16,6 @@ venv/
*.log
*.logs
-.ldash/
# Docs
docs/
diff --git a/braindrain/livingdash.py b/braindrain/livingdash.py
index 4cbea6e..0107b0a 100644
--- a/braindrain/livingdash.py
+++ b/braindrain/livingdash.py
@@ -8,6 +8,7 @@
import subprocess
import sys
from dataclasses import asdict, dataclass
+from datetime import UTC, datetime
from pathlib import Path
from typing import Any
@@ -20,18 +21,88 @@ class RuntimePaths:
scaffold_root: Path
server: Path
ui: Path
+ config: Path
data: Path
snapshot: Path
status: Path
auth: Path
pid: Path
+ commands_config: Path
+ services_config: Path
+ command_history: Path
+ process_state: Path
+ telemetry_export: Path
+
+
+DEFAULT_COMMANDS = {
+ "schema_version": "1.0",
+ "commands": [
+ {
+ "id": "ui_tests",
+ "label": "Run UI tests",
+ "category": "quality",
+ "description": "Run the LivingDash Vitest suite.",
+ "command": ["npm", "run", "test"],
+ "cwd": ".ldash/ui",
+ "timeout_seconds": 180,
+ },
+ {
+ "id": "ui_build",
+ "label": "Build UI bundle",
+ "category": "quality",
+ "description": "Build the production LivingDash UI bundle.",
+ "command": ["npm", "run", "build"],
+ "cwd": ".ldash/ui",
+ "timeout_seconds": 180,
+ },
+ {
+ "id": "backend_tests",
+ "label": "Run backend tests",
+ "category": "quality",
+ "description": "Run the LivingDash backend pytest suite.",
+ "command": ["./.venv/bin/python", "-m", "pytest", "tests/test_livingdash.py"],
+ "cwd": ".",
+ "timeout_seconds": 180,
+ },
+ ],
+}
+
+
+DEFAULT_SERVICES = {
+ "schema_version": "1.0",
+ "services": [
+ {
+ "id": "ui_preview",
+ "name": "UI Preview",
+ "description": "Launch the Vite preview server for the dashboard UI.",
+ "cwd": ".ldash/ui",
+ "start": ["npm", "run", "dev", "--", "--host", "127.0.0.1", "--port", "4173"],
+ "open_target": "http://127.0.0.1:4173",
+ "healthcheck_url": "http://127.0.0.1:4173",
+ "allowed_actions": ["start", "stop", "open"],
+ },
+ {
+ "id": "ui_tests_watch",
+ "name": "UI Tests Watch",
+ "description": "Run the Vitest watcher for fast dashboard UI iteration.",
+ "cwd": ".ldash/ui",
+ "start": ["npm", "run", "test:watch"],
+ "allowed_actions": ["start", "stop"],
+ },
+ ],
+}
SERVER_SHIM = """from braindrain.livingdash_sidecar import main\n\nif __name__ == "__main__":\n main()\n"""
+def _now_iso() -> str:
+ return datetime.now(UTC).isoformat().replace("+00:00", "Z")
+
+
def _runtime_paths(project_root: Path) -> RuntimePaths:
scaffold_root = project_root / ".ldash"
+ config = scaffold_root / "config"
root = project_root / ".braindrain" / "ldash"
data = root / "data"
return RuntimePaths(
@@ -39,11 +110,17 @@ def _runtime_paths(project_root: Path) -> RuntimePaths:
scaffold_root=scaffold_root,
server=scaffold_root / "server",
ui=scaffold_root / "ui",
+ config=config,
data=data,
snapshot=data / "snapshot.json",
status=data / "status.json",
auth=data / "auth.json",
pid=data / "livingdash.pid",
+ commands_config=config / "commands.json",
+ services_config=config / "services.json",
+ command_history=data / "command_history.json",
+ process_state=data / "process_state.json",
+ telemetry_export=data / "telemetry_export.json",
)
@@ -66,12 +143,23 @@ def _migrate_legacy_runtime_data(paths: RuntimePaths) -> None:
def ensure_livingdash_runtime(project_root: str | Path) -> RuntimePaths:
project_root = Path(project_root).expanduser().resolve()
paths = _runtime_paths(project_root)
- for path in (paths.root, paths.scaffold_root, paths.server, paths.ui, paths.data):
+ for path in (paths.root, paths.scaffold_root, paths.server, paths.ui, paths.config, paths.data):
path.mkdir(parents=True, exist_ok=True)
_migrate_legacy_runtime_data(paths)
app_py = paths.server / "app.py"
if not app_py.exists():
app_py.write_text(SERVER_SHIM, encoding="utf-8")
+ if not paths.commands_config.exists():
+ paths.commands_config.write_text(json.dumps(DEFAULT_COMMANDS, indent=2), encoding="utf-8")
+ if not paths.services_config.exists():
+ paths.services_config.write_text(json.dumps(DEFAULT_SERVICES, indent=2), encoding="utf-8")
+ for runtime_path, default_payload in (
+ (paths.command_history, {"schema_version": "1.0", "entries": []}),
+ (paths.process_state, {"schema_version": "1.0", "services": {}}),
+ (paths.telemetry_export, {"schema_version": "1.0", "exports": []}),
+ ):
+ if not runtime_path.exists():
+ runtime_path.write_text(json.dumps(default_payload, indent=2), encoding="utf-8")
return paths
@@ -149,6 +237,9 @@ def _detect_git_state(project_root: Path) -> dict[str, Any]:
except Exception:
default_branch = None
+ if not default_branch and current_branch:
+ default_branch = current_branch
+
try:
dirty = subprocess.run(
["git", "status", "--porcelain"],
@@ -341,6 +432,8 @@ def refresh(self) -> dict[str, Any]:
"snapshot_path": str(self.paths.snapshot),
"project_root": str(self.project_root),
"running": bool(status.get("running", False)),
+ "last_refreshed_at": _now_iso(),
+ "refresh_age_seconds": 0,
}
)
self._save_json(self.paths.status, status)
@@ -386,6 +479,8 @@ def start(self) -> dict[str, Any]:
"url": f"http://127.0.0.1:{port}",
"project_root": str(self.project_root),
"snapshot_path": str(self.paths.snapshot),
+ "last_refreshed_at": _now_iso(),
+ "refresh_age_seconds": 0,
}
self.paths.pid.write_text(str(proc.pid), encoding="utf-8")
self._save_json(self.paths.status, status)
diff --git a/braindrain/livingdash_sidecar.py b/braindrain/livingdash_sidecar.py
index b528a59..9af081d 100644
--- a/braindrain/livingdash_sidecar.py
+++ b/braindrain/livingdash_sidecar.py
@@ -2,9 +2,16 @@
import argparse
import json
+import os
+import socket
import subprocess
+import sys
+import time
+from datetime import UTC, datetime
from pathlib import Path
from typing import Any
+from urllib.error import URLError
+from urllib.request import urlopen
import uvicorn
from fastapi import FastAPI, HTTPException, Request
@@ -12,8 +19,11 @@
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
+from braindrain.livingdash import DEFAULT_COMMANDS, DEFAULT_SERVICES, ensure_livingdash_runtime
+
SESSION_COOKIE = "livingdash_session"
+CONTRACT_VERSION = "1.0"
class LoginPayload(BaseModel):
@@ -21,6 +31,10 @@ class LoginPayload(BaseModel):
password: str
+def _now_iso() -> str:
+ return datetime.now(UTC).isoformat().replace("+00:00", "Z")
+
+
def _load_json(path: Path, default: dict[str, Any]) -> dict[str, Any]:
if not path.exists():
return default
@@ -44,169 +58,614 @@ def _require_auth(request: Request, auth_config: dict[str, str]) -> None:
raise HTTPException(status_code=401, detail="Authentication required")
+def _runtime_paths(project_root: Path) -> Any:
+ return ensure_livingdash_runtime(project_root)
+
+
def _read_snapshot(data_dir: Path) -> dict[str, Any]:
return _load_json(data_dir / "snapshot.json", {})
def _read_status(data_dir: Path) -> dict[str, Any]:
- return _load_json(data_dir / "status.json", {})
-
-
-def _tone_from_state(value: Any, *, positive: set[str] | None = None, negative: set[str] | None = None) -> str:
- text = str(value or "").strip().lower()
- positive = positive or {"ok", "on", "hot", "clean", "active", "true"}
- negative = negative or {"error", "off", "failed", "dirty", "down", "false", "blocked"}
- if any(token in text for token in negative):
- return "rose"
- if any(token in text for token in positive):
- return "emerald"
- return "cyan"
-
-
-def _to_ui_snapshot(raw: dict[str, Any]) -> dict[str, Any]:
- workspace = raw.get("workspace", {}) or {}
- repo = raw.get("repo", {}) or {}
- signals = raw.get("workspace_signals", {}) or {}
- narrative = raw.get("narrative", {}) or {}
- startup = (narrative.get("startup_flow", {}) or {}).get("steps", []) or []
- key_modules = narrative.get("key_modules", []) or []
- git = signals.get("git", {}) or {}
- mcp = signals.get("mcp_tools", {}) or {}
- agents = signals.get("agents", {}) or {}
- env_files = signals.get("env_files", []) or []
- insights = raw.get("insights", {}) or {}
- map_summary = raw.get("map_summary", {}) or {}
+ status = _load_json(data_dir / "status.json", {})
+ last_refreshed_at = status.get("last_refreshed_at")
+ if isinstance(last_refreshed_at, str) and last_refreshed_at:
+ try:
+ refreshed = datetime.fromisoformat(last_refreshed_at.replace("Z", "+00:00"))
+ status["refresh_age_seconds"] = max(0, int((datetime.now(UTC) - refreshed).total_seconds()))
+ except ValueError:
+ status["refresh_age_seconds"] = int(status.get("refresh_age_seconds", 0) or 0)
+ return status
- branch = git.get("branch") or "unknown"
- dirty = git.get("dirty")
- git_state = "dirty" if dirty is True else ("clean" if dirty is False else "unknown")
- entrypoint = key_modules[0]["path"] if key_modules and isinstance(key_modules[0], dict) else "unknown"
- systems = [
- {
- "label": "Workspace",
- "value": workspace.get("name", "unknown"),
- "tone": "blue",
- "detail": f"Root: {workspace.get('root', 'unknown')}",
- },
- {
- "label": "MCP",
- "value": f"{mcp.get('count', 0)} active",
- "tone": "cyan",
- "detail": "Hot-loaded MCP tools available to the runtime.",
- },
- {
- "label": "Env",
- "value": f"{len(env_files)} files",
- "tone": "amber" if env_files else "rose",
- "detail": "Environment files detected in workspace root.",
- },
- {
- "label": "Git",
- "value": git_state,
- "tone": _tone_from_state(git_state, positive={"clean"}, negative={"dirty"}),
- "detail": f"Branch: {branch}",
- },
+def _read_history(paths: Any) -> dict[str, Any]:
+ return _load_json(paths.command_history, {"schema_version": CONTRACT_VERSION, "entries": []})
+
+
+def _write_history(paths: Any, payload: dict[str, Any]) -> None:
+ _save_json(paths.command_history, payload)
+
+
+def _read_process_state(paths: Any) -> dict[str, Any]:
+ return _load_json(paths.process_state, {"schema_version": CONTRACT_VERSION, "services": {}})
+
+
+def _write_process_state(paths: Any, payload: dict[str, Any]) -> None:
+ _save_json(paths.process_state, payload)
+
+
+def _read_telemetry_export(paths: Any) -> dict[str, Any]:
+ return _load_json(paths.telemetry_export, {"schema_version": CONTRACT_VERSION, "exports": []})
+
+
+def _write_telemetry_export(paths: Any, payload: dict[str, Any]) -> None:
+ _save_json(paths.telemetry_export, payload)
+
+
+def _read_commands(paths: Any) -> dict[str, Any]:
+ payload = _load_json(paths.commands_config, DEFAULT_COMMANDS)
+ commands = payload.get("commands")
+ if not isinstance(commands, list):
+ commands = DEFAULT_COMMANDS["commands"]
+ valid = []
+ for item in commands:
+ if not isinstance(item, dict):
+ continue
+ command = item.get("command")
+ if not isinstance(command, list) or not all(isinstance(part, str) and part for part in command):
+ continue
+ command_id = str(item.get("id", "")).strip()
+ label = str(item.get("label", "")).strip()
+ if not command_id or not label:
+ continue
+ valid.append(
+ {
+ "id": command_id,
+ "label": label,
+ "category": str(item.get("category", "general")),
+ "description": str(item.get("description", "")),
+ "command": command,
+ "cwd": str(item.get("cwd", ".")),
+ "timeout_seconds": int(item.get("timeout_seconds", 120)),
+ }
+ )
+ return {"schema_version": str(payload.get("schema_version", CONTRACT_VERSION)), "commands": valid}
+
+
+def _read_services(paths: Any) -> dict[str, Any]:
+ payload = _load_json(paths.services_config, DEFAULT_SERVICES)
+ services = payload.get("services")
+ if not isinstance(services, list):
+ services = DEFAULT_SERVICES["services"]
+ valid = []
+ for item in services:
+ if not isinstance(item, dict):
+ continue
+ service_id = str(item.get("id", "")).strip()
+ name = str(item.get("name", "")).strip()
+ if not service_id or not name:
+ continue
+ start = item.get("start")
+ if start is not None and (not isinstance(start, list) or not all(isinstance(part, str) and part for part in start)):
+ start = None
+ allowed_actions = item.get("allowed_actions") or []
+ valid.append(
+ {
+ "id": service_id,
+ "name": name,
+ "description": str(item.get("description", "")),
+ "cwd": str(item.get("cwd", ".")),
+ "start": start,
+ "open_target": item.get("open_target"),
+ "healthcheck_url": item.get("healthcheck_url"),
+ "allowed_actions": [str(action) for action in allowed_actions if isinstance(action, str)],
+ }
+ )
+ return {"schema_version": str(payload.get("schema_version", CONTRACT_VERSION)), "services": valid}
+
+
+def _resolve_cwd(project_root: Path, relative_cwd: str) -> Path:
+ candidate = (project_root / relative_cwd).resolve()
+ try:
+ candidate.relative_to(project_root)
+ except ValueError as exc:
+ raise HTTPException(status_code=400, detail="Configured cwd escapes project root") from exc
+ return candidate
+
+
+def _safe_run(project_root: Path, command: list[str], *, cwd: str = ".", timeout_seconds: int = 120) -> dict[str, Any]:
+ started = time.perf_counter()
+ run_cwd = _resolve_cwd(project_root, cwd)
+ try:
+ result = subprocess.run(
+ command,
+ cwd=run_cwd,
+ capture_output=True,
+ text=True,
+ timeout=timeout_seconds,
+ check=False,
+ )
+ duration_ms = int((time.perf_counter() - started) * 1000)
+ return {
+ "ok": result.returncode == 0,
+ "returncode": result.returncode,
+ "stdout": result.stdout[-5000:],
+ "stderr": result.stderr[-3000:],
+ "duration_ms": duration_ms,
+ }
+ except subprocess.TimeoutExpired as exc:
+ duration_ms = int((time.perf_counter() - started) * 1000)
+ return {
+ "ok": False,
+ "returncode": -1,
+ "stdout": (exc.stdout or "")[-5000:],
+ "stderr": ((exc.stderr or "") + "\nCommand timed out.")[-3000:],
+ "duration_ms": duration_ms,
+ }
+
+
+def _is_pid_running(pid: int | None) -> bool:
+ if not pid:
+ return False
+ try:
+ os.kill(int(pid), 0)
+ return True
+ except OSError:
+ return False
+
+
+def _check_url(url: str | None) -> bool:
+ if not url:
+ return False
+ try:
+ with urlopen(url, timeout=1.5) as response:
+ return int(getattr(response, "status", 200)) < 500
+ except (URLError, ValueError, TimeoutError):
+ return False
+
+
+def _open_target(target: str) -> tuple[bool, str]:
+ try:
+ if sys.platform == "darwin":
+ subprocess.Popen(["open", target], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ else:
+ subprocess.Popen(["xdg-open", target], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ return True, "Opened target"
+ except Exception as exc:
+ return False, f"Failed to open target: {exc}"
+
+
+def _record_history(paths: Any, entry: dict[str, Any]) -> dict[str, Any]:
+ history = _read_history(paths)
+ entries = history.get("entries", [])
+ if not isinstance(entries, list):
+ entries = []
+ entries.insert(0, entry)
+ history["entries"] = entries[:25]
+ history["updated_at"] = _now_iso()
+ _write_history(paths, history)
+ return history
+
+
+def _git_summary(project_root: Path) -> dict[str, Any]:
+ status = _safe_run(project_root, ["git", "status", "--porcelain=v1", "--branch"], cwd=".")
+ lines = [line for line in status["stdout"].splitlines() if line.strip()]
+ branch = "unknown"
+ ahead = 0
+ behind = 0
+ staged = 0
+ unstaged = 0
+ untracked = 0
+
+ if lines and lines[0].startswith("## "):
+ branch_line = lines.pop(0)[3:]
+ branch = branch_line.split("...", 1)[0].strip() or branch
+ if "ahead " in branch_line:
+ try:
+ ahead = int(branch_line.split("ahead ", 1)[1].split("]", 1)[0].split(",", 1)[0])
+ except Exception:
+ ahead = 0
+ if "behind " in branch_line:
+ try:
+ behind = int(branch_line.split("behind ", 1)[1].split("]", 1)[0].split(",", 1)[0])
+ except Exception:
+ behind = 0
+
+ for line in lines:
+ if line.startswith("??"):
+ untracked += 1
+ continue
+ x = line[:1]
+ y = line[1:2]
+ if x and x != " ":
+ staged += 1
+ if y and y != " ":
+ unstaged += 1
+
+ commits_raw = _safe_run(
+ project_root,
+ ["git", "log", "--max-count=5", "--pretty=format:%h%x1f%s%x1f%cr"],
+ cwd=".",
+ timeout_seconds=30,
+ )
+ commits = []
+ for line in commits_raw["stdout"].splitlines():
+ parts = line.split("\x1f")
+ if len(parts) == 3:
+ commits.append({"hash": parts[0], "subject": parts[1], "age": parts[2]})
+
+ return {
+ "branch": branch,
+ "dirty": staged > 0 or unstaged > 0 or untracked > 0,
+ "ahead": ahead,
+ "behind": behind,
+ "staged": staged,
+ "unstaged": unstaged,
+ "untracked": untracked,
+ "recent_commits": commits,
+ "last_checked_at": _now_iso(),
+ }
+
+
+def _telemetry_summary(snapshot: dict[str, Any], status: dict[str, Any], history: dict[str, Any]) -> dict[str, Any]:
+ signals = snapshot.get("workspace_signals", {}) or {}
+ mcp = signals.get("mcp_tools", {}) or {}
+ agents = signals.get("agents", {}) or {}
+ insights = snapshot.get("insights", {}) or {}
+ entries = history.get("entries", []) if isinstance(history.get("entries"), list) else []
+ recent_events = [
{
- "label": "Agents",
- "value": str(agents.get("count", 0)),
- "tone": "violet",
- "detail": "Detected local Cursor agent definitions.",
+ "kind": "command",
+ "label": entry.get("label", "Command"),
+ "status": entry.get("status", "unknown"),
+ "detail": f"exit {entry.get('returncode', 'n/a')} in {entry.get('duration_ms', 0)}ms",
+ "time": entry.get("finished_at", ""),
+ }
+ for entry in entries[:8]
+ ]
+ return {
+ "version": CONTRACT_VERSION,
+ "summary": {
+ "active_tools": int(mcp.get("count", 0) or 0),
+ "agents_online": int(agents.get("count", 0) or 0),
+ "refresh_age_seconds": int(status.get("refresh_age_seconds", 0) or 0),
+ "token_saving_active": bool(insights.get("token_saving_active", False)),
+ "env_drift": int(insights.get("env_drift", 0) or 0),
+ "recent_action_count": len(entries),
},
+ "events": recent_events,
+ "updated_at": _now_iso(),
+ }
+
+
+def _service_state(paths: Any, service: dict[str, Any]) -> dict[str, Any]:
+ state = _read_process_state(paths).get("services", {}).get(service["id"], {})
+ pid = state.get("pid")
+ running = _is_pid_running(pid)
+ healthy = _check_url(service.get("healthcheck_url")) if running else False
+ return {
+ "id": service["id"],
+ "name": service["name"],
+ "description": service.get("description", ""),
+ "cwd": service.get("cwd", "."),
+ "allowed_actions": service.get("allowed_actions", []),
+ "status": "running" if running else "stopped",
+ "healthy": healthy if running else False,
+ "pid": pid if running else None,
+ "open_target": service.get("open_target"),
+ "healthcheck_url": service.get("healthcheck_url"),
+ "last_started_at": state.get("last_started_at"),
+ "last_stopped_at": state.get("last_stopped_at"),
+ "last_exit_code": state.get("last_exit_code"),
+ }
+
+
+def _list_services(paths: Any) -> dict[str, Any]:
+ config = _read_services(paths)
+ return {
+ "version": CONTRACT_VERSION,
+ "items": [_service_state(paths, service) for service in config["services"]],
+ "updated_at": _now_iso(),
+ }
+
+
+def _find_command(paths: Any, command_id: str) -> dict[str, Any] | None:
+ for command in _read_commands(paths)["commands"]:
+ if command["id"] == command_id:
+ return command
+ return None
+
+
+def _find_service(paths: Any, service_id: str) -> dict[str, Any] | None:
+ for service in _read_services(paths)["services"]:
+ if service["id"] == service_id:
+ return service
+ return None
+
+
+def _build_overview(project_root: Path, paths: Any) -> dict[str, Any]:
+ snapshot = _read_snapshot(paths.data)
+ status = _read_status(paths.data)
+ git = _git_summary(project_root)
+ services = _list_services(paths)
+ history = _read_history(paths)
+ telemetry = _telemetry_summary(snapshot, status, history)
+ workspace = snapshot.get("workspace", {}) or {}
+ repo = snapshot.get("repo", {}) or {}
+ narrative = snapshot.get("narrative", {}) or {}
+
+ kpis = [
+ {"label": "Commands run", "value": str(len(history.get("entries", []) or [])), "tone": "violet"},
+ {"label": "Services running", "value": str(sum(1 for item in services["items"] if item["status"] == "running")), "tone": "cyan"},
+ {"label": "Git changes", "value": str(git["staged"] + git["unstaged"] + git["untracked"]), "tone": "amber"},
+ {"label": "Active MCP tools", "value": str(telemetry["summary"]["active_tools"]), "tone": "emerald"},
+ ]
+ recent_activity = [
{
- "label": "Risk",
- "value": "low" if int(insights.get("env_drift", 0) or 0) == 0 else "elevated",
- "tone": "emerald" if int(insights.get("env_drift", 0) or 0) == 0 else "rose",
- "detail": "Derived from env drift and workspace health indicators.",
- },
+ "label": entry.get("label", "Command"),
+ "detail": f"{entry.get('status', 'unknown')} · exit {entry.get('returncode', 'n/a')}",
+ "tone": "emerald" if entry.get("ok") else "rose",
+ }
+ for entry in (history.get("entries", []) or [])[:4]
+ ]
+ shortcuts = [
+ {"id": "commands", "label": "Open commands", "detail": "Run approved workspace commands.", "tone": "violet"},
+ {"id": "git", "label": "Open git status", "detail": "Inspect branch drift and guarded sync actions.", "tone": "cyan"},
+ {"id": "processes", "label": "Open processes", "detail": "Manage repo-scoped services only.", "tone": "amber"},
+ {"id": "telemetry", "label": "Open telemetry", "detail": "Inspect recent runtime signals and exports.", "tone": "emerald"},
]
return {
- "workspaceName": workspace.get("name", "workspace"),
- "projectName": repo.get("project_name", "LivingDash"),
- "branch": branch,
- "repoBrief": {
- "title": "What this repo does",
+ "version": CONTRACT_VERSION,
+ "workspace": {
+ "name": workspace.get("name", project_root.name),
+ "project_name": repo.get("project_name", project_root.name),
+ "branch": git["branch"],
+ },
+ "repo_brief": {
+ "title": "Workspace overview",
"summary": narrative.get("repo_brief", "No repository brief available."),
- "entrypoint": entrypoint,
- "posture": "Snapshot-first with live workspace signals",
+ "entrypoint": (narrative.get("key_modules") or [{}])[0].get("path", "unknown"),
+ "posture": "Operational shell with guarded local actions",
},
"facts": [
- {"label": "Workspace", "value": workspace.get("name", "unknown"), "tone": "blue"},
- {"label": "Project", "value": repo.get("project_name", "unknown"), "tone": "cyan"},
- {"label": "MCP", "value": f"{mcp.get('count', 0)} active", "tone": "emerald"},
- {"label": "Branch", "value": branch, "tone": "violet"},
- {"label": "Agents", "value": str(agents.get("count", 0)), "tone": "amber"},
- {"label": "Blockers", "value": "0", "tone": "rose"},
+ {"label": "Workspace", "value": workspace.get("name", project_root.name), "tone": "violet"},
+ {"label": "Project", "value": repo.get("project_name", project_root.name), "tone": "cyan"},
+ {"label": "Branch", "value": git["branch"], "tone": "emerald"},
+ {"label": "Dirty", "value": "Yes" if git["dirty"] else "No", "tone": "amber" if git["dirty"] else "emerald"},
+ {"label": "Tools", "value": str(telemetry["summary"]["active_tools"]), "tone": "violet"},
],
- "systems": systems,
- "startupFlow": [
+ "systems": [
{
- "label": step.get("label", "step"),
- "detail": f"id: {step.get('id', 'unknown')}",
- "tone": "blue" if idx == 0 else "cyan",
- }
- for idx, step in enumerate(startup)
- if isinstance(step, dict)
+ "label": "Git",
+ "value": "dirty" if git["dirty"] else "clean",
+ "tone": "amber" if git["dirty"] else "emerald",
+ "detail": f"ahead {git['ahead']} · behind {git['behind']}",
+ },
+ {
+ "label": "Processes",
+ "value": str(sum(1 for item in services["items"] if item["status"] == "running")),
+ "tone": "cyan",
+ "detail": "Repo-scoped services available to the dashboard.",
+ },
+ {
+ "label": "Telemetry",
+ "value": "active" if telemetry["summary"]["token_saving_active"] else "idle",
+ "tone": "violet",
+ "detail": f"refresh age {telemetry['summary']['refresh_age_seconds']}s",
+ },
],
- "actions": [
- {"label": action.get("label", "Action"), "detail": f"kind: {action.get('kind', 'view')}", "tone": "blue"}
- for action in (raw.get("actions", []) or [])
- if isinstance(action, dict)
+ "startup_flow": [
+ {"label": step.get("label", "step"), "detail": f"id: {step.get('id', 'unknown')}", "tone": "cyan"}
+ for step in ((narrative.get("startup_flow", {}) or {}).get("steps", []) or [])
+ if isinstance(step, dict)
],
- "mapAccess": {
- "label": "Bounded 2D systems map",
- "description": "Secondary map view for drilling into repo structure and hotspots.",
- "nodes": str(map_summary.get("nodes", 0)),
- "edges": str(max(0, int(map_summary.get("nodes", 0) or 0) - 1)),
- "hotspots": str(map_summary.get("hotspots", 0)),
+ "kpis": kpis,
+ "recent_activity": recent_activity,
+ "shortcuts": shortcuts,
+ "map_access": {
+ "label": "Map access",
+ "description": "The bounded systems map remains a secondary drill-down, not the primary workspace.",
"cta": "OPEN SYSTEM MAP",
},
+ "updated_at": _now_iso(),
}
-def _to_ui_live(raw: dict[str, Any], status: dict[str, Any]) -> dict[str, Any]:
- signals = raw.get("workspace_signals", {}) or {}
- insights = raw.get("insights", {}) or {}
- mcp = signals.get("mcp_tools", {}) or {}
- agents = signals.get("agents", {}) or {}
- git = signals.get("git", {}) or {}
- refresh_age = status.get("refresh_age_seconds", 0)
+def _commands_payload(paths: Any) -> dict[str, Any]:
+ registry = _read_commands(paths)
+ history = _read_history(paths)
+ groups: dict[str, list[dict[str, Any]]] = {}
+ for item in registry["commands"]:
+ groups.setdefault(item["category"], []).append(
+ {
+ "id": item["id"],
+ "label": item["label"],
+ "description": item["description"],
+ "cwd": item["cwd"],
+ "timeout_seconds": item["timeout_seconds"],
+ }
+ )
+ return {
+ "version": CONTRACT_VERSION,
+ "groups": [{"id": category, "label": category.replace("_", " ").title(), "items": items} for category, items in groups.items()],
+ "history": history.get("entries", []),
+ "updated_at": _now_iso(),
+ }
- env_drift = int(insights.get("env_drift", 0) or 0)
- token_saving = bool(insights.get("token_saving_active", False))
+def _git_payload(project_root: Path) -> dict[str, Any]:
+ git = _git_summary(project_root)
return {
- "refreshedAt": f"{refresh_age}s ago" if refresh_age else "just now",
- "activeTools": f"{mcp.get('count', 0)} active",
- "agentsOnline": f"{agents.get('count', 0)} online",
- "insights": [
- {
- "label": "Token saving",
- "value": "on" if token_saving else "off",
- "detail": "Telemetry indicates token-saving optimization status.",
- "tone": "emerald" if token_saving else "rose",
- },
- {
- "label": "Env drift",
- "value": str(env_drift),
- "detail": "Detected environment configuration drift count.",
- "tone": "emerald" if env_drift == 0 else "amber",
- },
- {
- "label": "Git state",
- "value": "dirty" if git.get("dirty") else "clean",
- "detail": f"Current branch: {git.get('branch') or 'unknown'}",
- "tone": "amber" if git.get("dirty") else "emerald",
- },
- ],
- "liveCounters": [
- {"label": "Tools", "value": str(mcp.get("count", 0)), "tone": "emerald"},
- {"label": "Agents", "value": str(agents.get("count", 0)), "tone": "cyan"},
- {"label": "Blocks", "value": "0", "tone": "rose"},
- {"label": "Refresh", "value": f"{refresh_age}s" if refresh_age else "0s", "tone": "violet"},
+ "version": CONTRACT_VERSION,
+ "summary": git,
+ "actions": [
+ {"id": "fetch", "label": "Fetch", "description": "Run git fetch --all --prune."},
+ {"id": "pull", "label": "Pull", "description": "Run git pull --ff-only on the current branch."},
],
+ "updated_at": _now_iso(),
+ }
+
+
+def _make_action_response(*, ok: bool, status: str, message: str, payload_key: str, payload: Any) -> dict[str, Any]:
+ return {
+ "ok": ok,
+ "status": status,
+ "message": message,
+ "updated_at": _now_iso(),
+ payload_key: payload,
+ }
+
+
+def _run_command(project_root: Path, paths: Any, command_id: str) -> dict[str, Any]:
+ command = _find_command(paths, command_id)
+ if not command:
+ raise HTTPException(status_code=404, detail="Unknown command")
+
+ result = _safe_run(
+ project_root,
+ command["command"],
+ cwd=command["cwd"],
+ timeout_seconds=command["timeout_seconds"],
+ )
+ entry = {
+ "id": command["id"],
+ "label": command["label"],
+ "category": command["category"],
+ "cwd": command["cwd"],
+ "ok": result["ok"],
+ "status": "success" if result["ok"] else "failed",
+ "returncode": result["returncode"],
+ "duration_ms": result["duration_ms"],
+ "stdout": result["stdout"],
+ "stderr": result["stderr"],
+ "finished_at": _now_iso(),
+ }
+ _record_history(paths, entry)
+ return _make_action_response(
+ ok=result["ok"],
+ status=entry["status"],
+ message=f"{command['label']} completed" if result["ok"] else f"{command['label']} failed",
+ payload_key="command_run",
+ payload=entry,
+ )
+
+
+def _git_action(project_root: Path, action: str) -> dict[str, Any]:
+ if action == "fetch":
+ result = _safe_run(project_root, ["git", "fetch", "--all", "--prune"], cwd=".", timeout_seconds=120)
+ elif action == "pull":
+ branch = _git_summary(project_root)["branch"]
+ result = _safe_run(project_root, ["git", "pull", "--ff-only", "origin", branch], cwd=".", timeout_seconds=120)
+ else:
+ raise HTTPException(status_code=400, detail="Unsupported git action")
+ payload = {
+ "action": action,
+ "returncode": result["returncode"],
+ "stdout": result["stdout"],
+ "stderr": result["stderr"],
+ "duration_ms": result["duration_ms"],
+ }
+ return _make_action_response(
+ ok=result["ok"],
+ status="success" if result["ok"] else "failed",
+ message=f"git {action} completed" if result["ok"] else f"git {action} failed",
+ payload_key="git_action",
+ payload=payload,
+ )
+
+
+def _start_service(project_root: Path, paths: Any, service: dict[str, Any]) -> dict[str, Any]:
+ if "start" not in service.get("allowed_actions", []):
+ raise HTTPException(status_code=403, detail="Start is not allowed for this service")
+ if not service.get("start"):
+ raise HTTPException(status_code=400, detail="Service has no start command")
+ state = _read_process_state(paths)
+ services_state = state.get("services", {})
+ current = services_state.get(service["id"], {})
+ if _is_pid_running(current.get("pid")):
+ return _make_action_response(
+ ok=True,
+ status="noop",
+ message=f"{service['name']} is already running",
+ payload_key="service",
+ payload=_service_state(paths, service),
+ )
+
+ proc = subprocess.Popen(
+ service["start"],
+ cwd=_resolve_cwd(project_root, service["cwd"]),
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL,
+ )
+ services_state[service["id"]] = {
+ "pid": proc.pid,
+ "last_started_at": _now_iso(),
+ "last_exit_code": None,
}
+ state["schema_version"] = CONTRACT_VERSION
+ state["services"] = services_state
+ _write_process_state(paths, state)
+ return _make_action_response(
+ ok=True,
+ status="success",
+ message=f"{service['name']} started",
+ payload_key="service",
+ payload=_service_state(paths, service),
+ )
+
+
+def _stop_service(paths: Any, service: dict[str, Any]) -> dict[str, Any]:
+ if "stop" not in service.get("allowed_actions", []):
+ raise HTTPException(status_code=403, detail="Stop is not allowed for this service")
+ state = _read_process_state(paths)
+ services_state = state.get("services", {})
+ current = services_state.get(service["id"], {})
+ pid = current.get("pid")
+ if not _is_pid_running(pid):
+ current.update({"pid": None, "last_stopped_at": _now_iso()})
+ services_state[service["id"]] = current
+ state["services"] = services_state
+ _write_process_state(paths, state)
+ return _make_action_response(
+ ok=True,
+ status="noop",
+ message=f"{service['name']} is already stopped",
+ payload_key="service",
+ payload=_service_state(paths, service),
+ )
+
+ try:
+ os.kill(int(pid), 15)
+ except OSError as exc:
+ raise HTTPException(status_code=500, detail=f"Failed to stop service: {exc}") from exc
+ current.update({"pid": None, "last_stopped_at": _now_iso()})
+ services_state[service["id"]] = current
+ state["services"] = services_state
+ _write_process_state(paths, state)
+ return _make_action_response(
+ ok=True,
+ status="success",
+ message=f"{service['name']} stopped",
+ payload_key="service",
+ payload=_service_state(paths, service),
+ )
+
+
+def _open_service(paths: Any, service: dict[str, Any]) -> dict[str, Any]:
+ if "open" not in service.get("allowed_actions", []):
+ raise HTTPException(status_code=403, detail="Open is not allowed for this service")
+ target = service.get("open_target")
+ if not isinstance(target, str) or not target:
+ raise HTTPException(status_code=400, detail="Service has no open target")
+ ok, message = _open_target(target)
+ return _make_action_response(
+ ok=ok,
+ status="success" if ok else "failed",
+ message=message,
+ payload_key="service",
+ payload=_service_state(paths, service),
+ )
def create_app(
@@ -216,6 +675,7 @@ def create_app(
ui_dist: Path,
auth_config: dict[str, str],
) -> FastAPI:
+ paths = _runtime_paths(project_root)
app = FastAPI(title="LivingDash Sidecar", docs_url=None, redoc_url=None)
if ui_dist.exists():
@@ -239,7 +699,7 @@ def session(request: Request) -> dict[str, Any]:
def login(payload: LoginPayload) -> JSONResponse:
if payload.username != auth_config.get("username") or payload.password != auth_config.get("password"):
raise HTTPException(status_code=401, detail="Invalid credentials")
- response = JSONResponse({"ok": True})
+ response = JSONResponse({"ok": True, "authenticated": True, "userName": auth_config.get("username")})
response.set_cookie(
SESSION_COOKIE,
auth_config.get("session_secret", ""),
@@ -254,42 +714,167 @@ def logout() -> JSONResponse:
response.delete_cookie(SESSION_COOKIE)
return response
+ @app.get("/api/overview")
+ def overview(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ return _build_overview(project_root, paths)
+
+ @app.get("/api/commands")
+ def commands(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ return _commands_payload(paths)
+
+ @app.get("/api/commands/history")
+ def commands_history(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ history = _read_history(paths)
+ return {"version": CONTRACT_VERSION, "items": history.get("entries", []), "updated_at": _now_iso()}
+
+ @app.post("/api/commands/run/{command_id}")
+ def run_command(command_id: str, request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ return _run_command(project_root, paths, command_id)
+
+ @app.get("/api/git")
+ def git_status(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ return _git_payload(project_root)
+
+ @app.post("/api/git/fetch")
+ def git_fetch(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ return _git_action(project_root, "fetch")
+
+ @app.post("/api/git/pull")
+ def git_pull(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ return _git_action(project_root, "pull")
+
+ @app.get("/api/processes")
+ def processes(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ return _list_services(paths)
+
+ @app.post("/api/processes/{service_id}/start")
+ def start_process(service_id: str, request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ service = _find_service(paths, service_id)
+ if not service:
+ raise HTTPException(status_code=404, detail="Unknown service")
+ return _start_service(project_root, paths, service)
+
+ @app.post("/api/processes/{service_id}/stop")
+ def stop_process(service_id: str, request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ service = _find_service(paths, service_id)
+ if not service:
+ raise HTTPException(status_code=404, detail="Unknown service")
+ return _stop_service(paths, service)
+
+ @app.post("/api/processes/{service_id}/open")
+ def open_process(service_id: str, request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ service = _find_service(paths, service_id)
+ if not service:
+ raise HTTPException(status_code=404, detail="Unknown service")
+ return _open_service(paths, service)
+
+ @app.get("/api/telemetry")
+ def telemetry(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ snapshot = _read_snapshot(data_dir)
+ status = _read_status(data_dir)
+ history = _read_history(paths)
+ return _telemetry_summary(snapshot, status, history)
+
+ @app.get("/api/telemetry/export")
+ def telemetry_export(request: Request) -> dict[str, Any]:
+ _require_auth(request, auth_config)
+ snapshot = _read_snapshot(data_dir)
+ status = _read_status(data_dir)
+ history = _read_history(paths)
+ telemetry = _telemetry_summary(snapshot, status, history)
+ cache = _read_telemetry_export(paths)
+ exports = cache.get("exports", [])
+ if not isinstance(exports, list):
+ exports = []
+ record = {"created_at": _now_iso(), "telemetry": telemetry}
+ exports.insert(0, record)
+ cache["schema_version"] = CONTRACT_VERSION
+ cache["exports"] = exports[:10]
+ _write_telemetry_export(paths, cache)
+ return {
+ "version": CONTRACT_VERSION,
+ "export": record,
+ "updated_at": _now_iso(),
+ }
+
@app.get("/api/snapshot")
def snapshot(request: Request) -> dict[str, Any]:
_require_auth(request, auth_config)
- return _to_ui_snapshot(_read_snapshot(data_dir))
+ overview_payload = _build_overview(project_root, paths)
+ return {
+ "workspaceName": overview_payload["workspace"]["name"],
+ "projectName": overview_payload["workspace"]["project_name"],
+ "branch": overview_payload["workspace"]["branch"],
+ "repoBrief": overview_payload["repo_brief"],
+ "facts": overview_payload["facts"],
+ "systems": overview_payload["systems"],
+ "startupFlow": overview_payload["startup_flow"],
+ "actions": overview_payload["shortcuts"],
+ "mapAccess": overview_payload["map_access"],
+ }
@app.get("/api/live")
def live(request: Request) -> dict[str, Any]:
_require_auth(request, auth_config)
- snapshot = _read_snapshot(data_dir)
- status = _read_status(data_dir)
- return _to_ui_live(snapshot, status)
+ telemetry_payload = _telemetry_summary(_read_snapshot(data_dir), _read_status(data_dir), _read_history(paths))
+ summary = telemetry_payload["summary"]
+ events = telemetry_payload["events"]
+ return {
+ "refreshedAt": f"{summary['refresh_age_seconds']}s ago" if summary["refresh_age_seconds"] else "just now",
+ "activeTools": f"{summary['active_tools']} active",
+ "agentsOnline": f"{summary['agents_online']} online",
+ "insights": [
+ {
+ "label": "Token saving",
+ "value": "on" if summary["token_saving_active"] else "off",
+ "detail": "Token-saving telemetry from the latest snapshot.",
+ "tone": "emerald" if summary["token_saving_active"] else "rose",
+ },
+ {
+ "label": "Env drift",
+ "value": str(summary["env_drift"]),
+ "detail": "Configuration drift count from the snapshot insights.",
+ "tone": "emerald" if summary["env_drift"] == 0 else "amber",
+ },
+ {
+ "label": "Recent actions",
+ "value": str(summary["recent_action_count"]),
+ "detail": "Approved command runs captured by the dashboard.",
+ "tone": "violet",
+ },
+ ],
+ "liveCounters": [
+ {"label": "Tools", "value": str(summary["active_tools"]), "tone": "emerald"},
+ {"label": "Agents", "value": str(summary["agents_online"]), "tone": "cyan"},
+ {"label": "Actions", "value": str(summary["recent_action_count"]), "tone": "violet"},
+ {"label": "Refresh", "value": f"{summary['refresh_age_seconds']}s", "tone": "amber"},
+ ],
+ "events": events,
+ }
@app.get("/api/actions")
def actions(request: Request) -> dict[str, Any]:
_require_auth(request, auth_config)
- snapshot = _read_snapshot(data_dir)
- return {"actions": snapshot.get("actions", [])}
+ return {"actions": _build_overview(project_root, paths)["shortcuts"]}
@app.post("/api/actions/run/{action_id}")
def run_action(action_id: str, request: Request) -> dict[str, Any]:
_require_auth(request, auth_config)
- if action_id != "run_tests":
- return {"ok": False, "action_id": action_id, "status": "view_only"}
- result = subprocess.run(
- ["./.venv/bin/python", "-m", "pytest", "tests/test_livingdash.py"],
- cwd=project_root,
- capture_output=True,
- text=True,
- )
- return {
- "ok": result.returncode == 0,
- "action_id": action_id,
- "stdout": result.stdout[-2000:],
- "stderr": result.stderr[-1000:],
- "returncode": result.returncode,
- }
+ if action_id == "commands":
+ return _make_action_response(ok=True, status="view_only", message="Open the Commands tab.", payload_key="action", payload={"id": action_id})
+ return _make_action_response(ok=False, status="view_only", message="Legacy action is view-only in MVP.", payload_key="action", payload={"id": action_id})
@app.get("/api/map")
def map_data(request: Request) -> dict[str, Any]:
@@ -306,7 +891,7 @@ def index(full_path: str):
LivingDash
-
+
LivingDash
The UI build has not been generated yet.
@@ -317,19 +902,11 @@ def index(full_path: str):
return app
-def _env_path(name: str) -> Path:
- value = Path(Path.cwd()) if name == "LIVINGDASH_PROJECT_ROOT" else None
- raw = value or Path(__import__("os").environ[name])
- return Path(raw).expanduser().resolve()
-
-
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=7337)
args = parser.parse_args()
- import os
-
project_root = Path(os.environ["LIVINGDASH_PROJECT_ROOT"]).expanduser().resolve()
data_dir = Path(os.environ["LIVINGDASH_DATA_DIR"]).expanduser().resolve()
ui_dist = Path(os.environ["LIVINGDASH_UI_DIST"]).expanduser().resolve()
@@ -348,4 +925,3 @@ def main() -> None:
if __name__ == "__main__":
main()
-
diff --git a/tests/test_livingdash.py b/tests/test_livingdash.py
index 33d49cb..01d597b 100644
--- a/tests/test_livingdash.py
+++ b/tests/test_livingdash.py
@@ -84,6 +84,8 @@ def test_ensure_livingdash_runtime_creates_isolated_layout(tmp_project_dir: Path
assert runtime.scaffold_root == scaffold
assert (scaffold / "server").is_dir()
assert (scaffold / "ui").is_dir()
+ assert (scaffold / "config" / "commands.json").is_file()
+ assert (scaffold / "config" / "services.json").is_file()
assert (root / "data").is_dir()
assert (scaffold / "server" / "app.py").is_file()
@@ -157,13 +159,34 @@ def test_sidecar_auth_and_snapshot_endpoints(tmp_project_dir: Path) -> None:
session = client.get("/api/auth/session")
assert session.json()["authenticated"] is True
+ overview = client.get("/api/overview")
+ assert overview.status_code == 200
+ assert overview.json()["workspace"]["name"] == "sample-project"
+ assert overview.json()["startup_flow"]
+
+ commands = client.get("/api/commands")
+ assert commands.status_code == 200
+ assert commands.json()["groups"]
+
+ git_status = client.get("/api/git")
+ assert git_status.status_code == 200
+ assert "summary" in git_status.json()
+
+ processes = client.get("/api/processes")
+ assert processes.status_code == 200
+ assert processes.json()["items"]
+
+ telemetry = client.get("/api/telemetry")
+ assert telemetry.status_code == 200
+ assert "summary" in telemetry.json()
+
snapshot = client.get("/api/snapshot")
assert snapshot.status_code == 200
- assert snapshot.json()["narrative"]["startup_flow"]["steps"]
+ assert snapshot.json()["startupFlow"]
live = client.get("/api/live")
assert live.status_code == 200
- assert "workspace_signals" in live.json()
+ assert "events" in live.json()
def test_ensure_livingdash_runtime_migrates_legacy_ldash_data(tmp_project_dir: Path) -> None:
@@ -178,3 +201,29 @@ def test_ensure_livingdash_runtime_migrates_legacy_ldash_data(tmp_project_dir: P
assert runtime.auth.exists()
assert runtime.status.exists()
assert json.loads(runtime.auth.read_text(encoding="utf-8"))["password"] == "legacy"
+
+
+def test_sidecar_rejects_unknown_or_disallowed_actions(tmp_project_dir: Path) -> None:
+ project = _make_sample_project(tmp_project_dir)
+ manager = LivingDashManager(project)
+ auth = manager.ensure_auth()
+ manager.refresh()
+
+ app = create_app(
+ project_root=project,
+ data_dir=project / ".braindrain" / "ldash" / "data",
+ ui_dist=project / ".ldash" / "ui" / "dist",
+ auth_config=auth,
+ )
+ client = TestClient(app)
+ login = client.post("/api/auth/login", json={"username": "admin", "password": auth["password"]})
+ assert login.status_code == 200
+
+ missing_command = client.post("/api/commands/run/does-not-exist")
+ assert missing_command.status_code == 404
+
+ unknown_service = client.post("/api/processes/missing/start")
+ assert unknown_service.status_code == 404
+
+ open_watcher = client.post("/api/processes/ui_tests_watch/open")
+ assert open_watcher.status_code == 403
From f0cebb3ba2cd836a8ded7e746a921a3c56b65e7d Mon Sep 17 00:00:00 2001
From: ettienne <12529192+jhweb@users.noreply.github.com>
Date: Sun, 26 Apr 2026 14:01:31 +0200
Subject: [PATCH 05/10] feat: add daily plan audit hook and report generator
Add a daily-gated Cursor stop hook and report generator to audit plan execution status with overlap/gap findings so planning drift is visible every day. This also updates hook docs and tests to keep deployment behavior verifiable.
Made-with: Cursor
---
README.md | 9 +-
config/templates/cursor/hooks.json | 3 +
.../cursor/hooks/on-stop-daily-plan-audit.sh | 48 ++
scripts/daily_plan_audit.py | 487 ++++++++++++++++++
tests/test_workspace_primer_hooks.py | 76 ++-
5 files changed, 620 insertions(+), 3 deletions(-)
create mode 100644 config/templates/cursor/hooks/on-stop-daily-plan-audit.sh
create mode 100644 scripts/daily_plan_audit.py
diff --git a/README.md b/README.md
index b7ec332..646513e 100644
--- a/README.md
+++ b/README.md
@@ -414,7 +414,11 @@ braindrain/
- Source-of-truth for those generated rule files is `config/templates/ruler/RULES.md` (and `.ruler/ruler.toml`).
- **Important**: files like `CLAUDE.md` are **generated artifacts** (gitignored) and should be treated as **disposable**. Edit the templates instead, then re-run Ruler.
- If a project already has older `.ruler/*` files, call `prime_workspace(..., sync_templates=true)` to refresh those templates safely and propagate new guidance without manual cleanup.
-- **Cursor hooks (not Ruler)**: when the resolved agent set includes Cursor, `prime_workspace()` copies `config/templates/cursor/hooks.json` and `config/templates/cursor/hooks/*.sh` into `.cursor/` (create-only by default; `sync_templates=true` overwrites with timestamped backups). Edit templates under `config/templates/cursor/` in this repo, then re-prime consumer projects to roll out hook changes.
+- **Cursor hooks (not Ruler)**: when the resolved agent set includes Cursor, `prime_workspace()` copies `config/templates/cursor/hooks.json` and `config/templates/cursor/hooks/*.sh` into `.cursor/` (create-only by default; `sync_templates=true` overwrites with timestamped backups). Hook templates currently include:
+ - `.cursor/hooks/on-stop-observe.sh` (lightweight stop-event observation)
+ - `.cursor/hooks/on-stop-gitops.sh` (TASK-GRAPH branch queueing)
+ - `.cursor/hooks/on-stop-daily-plan-audit.sh` (daily-gated planning audit report)
+ Edit templates under `config/templates/cursor/` in this repo, then re-prime consumer projects to roll out hook changes.
- **Subagent templates**: `prime_workspace()` deploys:
- `config/templates/cursor-subagents/` -> `.cursor/agents/`
- `config/templates/cursor-skills/` -> `.cursor/skills/` (e.g. scriptlib-librarian)
@@ -459,12 +463,15 @@ Implemented now (runtime behavior in this repo):
- Automation hooks and scheduler helpers are wired:
- `.cursor/hooks/on-stop-observe.sh`
- `.cursor/hooks/on-stop-gitops.sh`
+ - `.cursor/hooks/on-stop-daily-plan-audit.sh`
- `scripts/run_dream_cron.sh`
Memory artifacts and paths:
- Durable project memory path: `.braindrain/AGENT_MEMORY.md` (machine-local, gitignored).
- Incremental transcript index path: `.cursor/hooks/state/continual-learning-index.json`.
+- Daily planning audit hook state path: `.cursor/hooks/state/daily-plan-audit.json`.
+- Daily planning audit reports path: `create-subagent/plan-audit-YYYY-MM-DD.md` (plus `create-subagent/latest.md`).
- Dream artifacts path: `~/.braindrain/dreaming/` (`plans/`, `daily/`, `DREAMS.md`, `last_status.json`).
- `init_project_memory(path, dry_run)` bootstraps memory artifacts and is idempotent.
- `prime_workspace()` includes memory initialization in onboarding.
diff --git a/config/templates/cursor/hooks.json b/config/templates/cursor/hooks.json
index 630ad3a..953dedb 100644
--- a/config/templates/cursor/hooks.json
+++ b/config/templates/cursor/hooks.json
@@ -2,6 +2,9 @@
"version": 1,
"hooks": {
"stop": [
+ {
+ "command": ".cursor/hooks/on-stop-daily-plan-audit.sh"
+ },
{
"command": ".cursor/hooks/on-stop-gitops.sh"
},
diff --git a/config/templates/cursor/hooks/on-stop-daily-plan-audit.sh b/config/templates/cursor/hooks/on-stop-daily-plan-audit.sh
new file mode 100644
index 0000000..7a1c839
--- /dev/null
+++ b/config/templates/cursor/hooks/on-stop-daily-plan-audit.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+# on-stop-daily-plan-audit.sh
+# Cursor stop hook: run planning audit at most once per day.
+
+set -euo pipefail
+
+command -v git >/dev/null 2>&1 || exit 0
+command -v python3 >/dev/null 2>&1 || exit 0
+
+REPO_ROOT="$(git rev-parse --show-toplevel 2>/dev/null)" || exit 0
+AUDIT_SCRIPT="${REPO_ROOT}/scripts/daily_plan_audit.py"
+STATE_DIR="${REPO_ROOT}/.cursor/hooks/state"
+STATE_FILE="${STATE_DIR}/daily-plan-audit.json"
+OUT_DIR="${REPO_ROOT}/create-subagent"
+TODAY="$(date +%Y-%m-%d)"
+
+[ -f "${AUDIT_SCRIPT}" ] || exit 0
+
+mkdir -p "${STATE_DIR}"
+mkdir -p "${OUT_DIR}"
+
+LAST_RUN_DATE=""
+if [ -f "${STATE_FILE}" ]; then
+ LAST_RUN_DATE="$(
+ python3 -c 'import json,sys; p=sys.argv[1];
+try:
+ d=json.load(open(p, "r", encoding="utf-8"))
+ print(d.get("last_run_date",""))
+except Exception:
+ print("")' "${STATE_FILE}" 2>/dev/null || true
+ )"
+fi
+
+if [ "${LAST_RUN_DATE}" = "${TODAY}" ]; then
+ exit 0
+fi
+
+if python3 "${AUDIT_SCRIPT}" \
+ --repo-root "${REPO_ROOT}" \
+ --report-date "${TODAY}" \
+ --trigger "cursor-stop-daily-gated" \
+ --output-dir "${OUT_DIR}" >/dev/null 2>&1; then
+ TMP_FILE="${STATE_FILE}.tmp.$$"
+ printf '{\n "last_run_date": "%s",\n "trigger": "cursor-stop-daily-gated"\n}\n' "${TODAY}" > "${TMP_FILE}"
+ mv "${TMP_FILE}" "${STATE_FILE}"
+fi
+
+exit 0
diff --git a/scripts/daily_plan_audit.py b/scripts/daily_plan_audit.py
new file mode 100644
index 0000000..6c7f891
--- /dev/null
+++ b/scripts/daily_plan_audit.py
@@ -0,0 +1,487 @@
+#!/usr/bin/env python3
+"""Generate a daily planning audit report in markdown format.
+
+Priority source order:
+1) .cursor/plans/*.plan.md
+2) Secondary markdown docs in repo
+"""
+
+from __future__ import annotations
+
+import argparse
+import dataclasses
+import datetime as dt
+import re
+import shutil
+from collections import Counter, defaultdict
+from pathlib import Path
+from typing import Iterable
+
+
+SCHEMA_VERSION = "1.0"
+STOP_WORDS = {
+ "a",
+ "an",
+ "and",
+ "are",
+ "as",
+ "at",
+ "be",
+ "by",
+ "for",
+ "from",
+ "in",
+ "is",
+ "it",
+ "of",
+ "on",
+ "or",
+ "that",
+ "the",
+ "to",
+ "with",
+}
+PLANNING_KEYWORDS = (
+ "roadmap",
+ "todo",
+ "plan",
+ "planning",
+ "milestone",
+ "backlog",
+ "task",
+ "next",
+ "open",
+ "outstanding",
+ "shipped",
+ "done",
+)
+STATUS_ORDER = ["Implemented", "In Progress", "Blocked", "Outstanding", "Unknown"]
+ITEM_LINE_RE = re.compile(r"^\s*(?:[-*]|\d+\.)\s+(.*)$")
+CHECKBOX_RE = re.compile(r"^\[([ xX])\]\s*(.*)$")
+HEADING_RE = re.compile(r"^\s*#{1,6}\s+(.*)$")
+BACKTICK_RE = re.compile(r"`([^`]+)`")
+PATHISH_RE = re.compile(r"\b(?:[\w.-]+/)+[\w.-]+\b")
+
+
+@dataclasses.dataclass
+class PlanItem:
+ item: str
+ source: str
+ status: str
+ confidence: str
+ evidence: list[str]
+ why: str
+ tokens: set[str]
+
+
+def parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description="Daily planning audit report generator")
+ parser.add_argument("--repo-root", default=".", help="Repository root")
+ parser.add_argument(
+ "--output-dir",
+ default="create-subagent",
+ help="Output directory for markdown reports (repo-relative if not absolute)",
+ )
+ parser.add_argument("--report-date", default=dt.date.today().isoformat())
+ parser.add_argument("--trigger", default="cursor-stop-daily-gated")
+ return parser.parse_args()
+
+
+def is_secondary_doc(path: Path) -> bool:
+ lowered = path.as_posix().lower()
+ if "/.git/" in lowered:
+ return False
+ if "/create-subagent/" in lowered:
+ return False
+ if "/.cursor/plans/" in lowered:
+ return False
+ if "/.cursor/agents/" in lowered:
+ return False
+ if "/config/templates/" in lowered:
+ return False
+ if path.suffix.lower() != ".md":
+ return False
+ filename = path.name.lower()
+ return any(k in filename for k in ("plan", "roadmap", "todo", "task", "backlog", "milestone", "prd"))
+
+
+def discover_sources(repo_root: Path) -> tuple[list[Path], list[Path]]:
+ primary = sorted((repo_root / ".cursor" / "plans").glob("*.plan.md"))
+ secondary: list[Path] = []
+ for path in repo_root.rglob("*.md"):
+ if is_secondary_doc(path):
+ snippet = path.read_text(encoding="utf-8", errors="ignore")[:2500].lower()
+ if any(k in snippet for k in PLANNING_KEYWORDS):
+ secondary.append(path)
+ secondary.sort()
+ return primary, secondary
+
+
+def tokenize(text: str) -> set[str]:
+ words = re.findall(r"[a-z0-9]+", text.lower())
+ return {w for w in words if len(w) > 2 and w not in STOP_WORDS}
+
+
+def extract_path_refs(text: str) -> list[str]:
+ refs = set(BACKTICK_RE.findall(text))
+ refs.update(PATHISH_RE.findall(text))
+ cleaned = []
+ for ref in refs:
+ value = ref.strip().strip(".,;:()[]{}")
+ if "/" in value and not value.startswith("http"):
+ cleaned.append(value)
+ return sorted(set(cleaned))
+
+
+def classify_status(text: str, checked: str | None) -> tuple[str, str]:
+ lowered = text.lower()
+ if checked == "x":
+ return "Implemented", "high"
+ if any(k in lowered for k in ["blocked", "waiting on", "depends on", "dependency"]):
+ return "Blocked", "medium"
+ if any(k in lowered for k in ["in progress", "wip", "ongoing", "active"]):
+ return "In Progress", "medium"
+ if checked == " ":
+ return "Outstanding", "high"
+ if any(k in lowered for k in ["todo", "next", "open", "planned"]):
+ return "Outstanding", "medium"
+ return "Unknown", "low"
+
+
+def collect_items(path: Path, repo_root: Path) -> list[PlanItem]:
+ items: list[PlanItem] = []
+ current_heading = ""
+ heading_planning = False
+ lines = path.read_text(encoding="utf-8", errors="ignore").splitlines()
+
+ for line in lines:
+ heading_match = HEADING_RE.match(line)
+ if heading_match:
+ current_heading = heading_match.group(1).strip()
+ heading_planning = any(k in current_heading.lower() for k in PLANNING_KEYWORDS)
+ continue
+
+ item_match = ITEM_LINE_RE.match(line)
+ if not item_match:
+ continue
+
+ body = item_match.group(1).strip()
+ checked: str | None = None
+ checkbox = CHECKBOX_RE.match(body)
+ if checkbox:
+ checked = checkbox.group(1).lower()
+ body = checkbox.group(2).strip()
+
+ if len(body) < 8:
+ continue
+
+ status, confidence = classify_status(body, checked)
+ if status == "Unknown" and not heading_planning and checked is None:
+ continue
+ path_refs = extract_path_refs(body)
+ evidence: list[str] = []
+ for ref in path_refs[:3]:
+ resolved = (repo_root / ref).resolve()
+ if resolved.exists():
+ evidence.append(ref)
+ if not evidence:
+ evidence.append(f"{path.relative_to(repo_root).as_posix()}#{current_heading or 'item'}")
+
+ why = f"Derived from {'checked' if checked == 'x' else 'unchecked' if checked == ' ' else 'textual'} signal."
+ item = PlanItem(
+ item=body,
+ source=path.relative_to(repo_root).as_posix(),
+ status=status,
+ confidence=confidence,
+ evidence=evidence,
+ why=why,
+ tokens=tokenize(body),
+ )
+ items.append(item)
+ return items
+
+
+def jaccard(left: set[str], right: set[str]) -> float:
+ if not left or not right:
+ return 0.0
+ return len(left & right) / len(left | right)
+
+
+def detect_overlaps(items: list[PlanItem]) -> list[dict[str, str]]:
+ overlaps: list[dict[str, str]] = []
+ for idx, a in enumerate(items):
+ for b in items[idx + 1 :]:
+ if a.source == b.source:
+ continue
+ score = jaccard(a.tokens, b.tokens)
+ if score < 0.55:
+ continue
+ severity = "high" if a.status != b.status else "medium"
+ overlaps.append(
+ {
+ "item_a": a.item,
+ "source_a": a.source,
+ "item_b": b.item,
+ "source_b": b.source,
+ "similarity": f"{score:.2f}",
+ "severity": severity,
+ }
+ )
+ overlaps.sort(key=lambda x: (x["severity"] != "high", -float(x["similarity"])))
+ return overlaps
+
+
+def detect_gaps(items: list[PlanItem]) -> list[dict[str, str]]:
+ gaps: list[dict[str, str]] = []
+ for item in items:
+ if item.status not in {"Outstanding", "In Progress", "Blocked"}:
+ continue
+ lowered = item.item.lower()
+ has_owner = "@" in item.item or "owner" in lowered
+ has_test_hint = "test" in lowered
+ has_path_evidence = any("/" in ev for ev in item.evidence)
+ missing = []
+ if not has_owner:
+ missing.append("owner")
+ if not has_test_hint:
+ missing.append("test")
+ if not has_path_evidence:
+ missing.append("evidence")
+ if missing:
+ risk = "high" if item.status == "Blocked" else "medium"
+ gaps.append(
+ {
+ "item": item.item,
+ "source": item.source,
+ "missing": ", ".join(missing),
+ "risk": risk,
+ }
+ )
+ gaps.sort(key=lambda x: (x["risk"] != "high", x["missing"]))
+ return gaps
+
+
+def memory_context(repo_root: Path) -> dict[str, object]:
+ candidates = [
+ repo_root / ".braindrain" / "AGENT_MEMORY.md",
+ repo_root / ".cursor" / "hooks" / "state" / "continual-learning-index.json",
+ ]
+ existing = [p for p in candidates if p.exists()]
+ return {
+ "used": bool(existing),
+ "sources": [p.relative_to(repo_root).as_posix() for p in existing],
+ }
+
+
+def score_report(items: list[PlanItem], overlaps: list[dict[str, str]], gaps: list[dict[str, str]]) -> dict[str, int]:
+ counts = Counter(i.status for i in items)
+ known = max(1, sum(counts[s] for s in STATUS_ORDER[:-1]))
+ coverage = int((counts["Implemented"] / known) * 100)
+ overlap_score = min(100, len(overlaps) * 15)
+ gap_score = min(100, len(gaps) * 12)
+ return {
+ "overlap_score": overlap_score,
+ "gap_score": gap_score,
+ "coverage_score": coverage,
+ }
+
+
+def render_status_section(title: str, items: list[PlanItem]) -> list[str]:
+ lines = [f"## {title}", ""]
+ if not items:
+ lines.append("- None")
+ lines.append("")
+ return lines
+
+ for entry in items:
+ lines.append(f"- Item: {entry.item}")
+ lines.append(f" - Source: `{entry.source}`")
+ lines.append(f" - Status: `{entry.status}`")
+ lines.append(f" - Confidence: `{entry.confidence}`")
+ lines.append(" - Evidence:")
+ for ev in entry.evidence[:3]:
+ lines.append(f" - `{ev}`")
+ lines.append(f" - Why: {entry.why}")
+ lines.append("")
+ return lines
+
+
+def build_report(
+ report_date: str,
+ trigger: str,
+ repo_root: Path,
+ primary: list[Path],
+ secondary: list[Path],
+ items: list[PlanItem],
+) -> str:
+ overlaps = detect_overlaps(items)
+ gaps = detect_gaps(items)
+ scores = score_report(items, overlaps, gaps)
+ summary_counts = Counter(item.status for item in items)
+ mem = memory_context(repo_root)
+
+ top_risks: list[str] = []
+ if any(i.status == "Blocked" for i in items):
+ top_risks.append("Blocked plan items require immediate owner assignment.")
+ if gaps:
+ top_risks.append("Open items are missing owner/test/evidence details.")
+ if overlaps:
+ top_risks.append("Overlapping plan entries may create duplicated delivery work.")
+ if not top_risks:
+ top_risks.append("No major risks detected from current planning artifacts.")
+
+ frontmatter = {
+ "schema_version": SCHEMA_VERSION,
+ "report_date": report_date,
+ "trigger": trigger,
+ "sources": {
+ "primary_plan_files": [p.relative_to(repo_root).as_posix() for p in primary],
+ "secondary_docs_count": len(secondary),
+ },
+ "summary_counts": {
+ "implemented": summary_counts["Implemented"],
+ "in_progress": summary_counts["In Progress"],
+ "blocked": summary_counts["Blocked"],
+ "outstanding": summary_counts["Outstanding"],
+ "unknown": summary_counts["Unknown"],
+ },
+ "analysis_scores": scores,
+ "top_risks": top_risks[:5],
+ "memory_context": mem,
+ }
+
+ body: list[str] = []
+ body.append("---")
+ body.append(f"schema_version: \"{frontmatter['schema_version']}\"")
+ body.append(f"report_date: \"{frontmatter['report_date']}\"")
+ body.append(f"trigger: \"{frontmatter['trigger']}\"")
+ body.append("sources:")
+ body.append(" primary_plan_files:")
+ for plan_file in frontmatter["sources"]["primary_plan_files"]:
+ body.append(f" - \"{plan_file}\"")
+ body.append(f" secondary_docs_count: {frontmatter['sources']['secondary_docs_count']}")
+ body.append("summary_counts:")
+ for k, v in frontmatter["summary_counts"].items():
+ body.append(f" {k}: {v}")
+ body.append("analysis_scores:")
+ for k, v in frontmatter["analysis_scores"].items():
+ body.append(f" {k}: {v}")
+ body.append("top_risks:")
+ for risk in frontmatter["top_risks"]:
+ body.append(f" - \"{risk}\"")
+ body.append("memory_context:")
+ body.append(f" used: {str(frontmatter['memory_context']['used']).lower()}")
+ body.append(" sources:")
+ for source in frontmatter["memory_context"]["sources"]:
+ body.append(f" - \"{source}\"")
+ if not frontmatter["memory_context"]["sources"]:
+ body.append(" - \"none\"")
+ body.append("---")
+ body.append("")
+ body.append("# Daily Plan Audit Report")
+ body.append("")
+ body.append("## Executive Summary")
+ body.append(
+ f"- Scanned {len(primary)} primary plan files and {len(secondary)} secondary markdown docs."
+ )
+ body.append(
+ f"- Status totals: Implemented={summary_counts['Implemented']}, In Progress={summary_counts['In Progress']}, Blocked={summary_counts['Blocked']}, Outstanding={summary_counts['Outstanding']}, Unknown={summary_counts['Unknown']}."
+ )
+ body.append(
+ f"- Scores: coverage={scores['coverage_score']}, overlap={scores['overlap_score']}, gap={scores['gap_score']}."
+ )
+ body.append("")
+ body.append("## Status Matrix (5-State)")
+ body.append("| Status | Count |")
+ body.append("|---|---:|")
+ for status in STATUS_ORDER:
+ body.append(f"| {status} | {summary_counts[status]} |")
+ body.append("")
+
+ grouped = defaultdict(list)
+ for item in items:
+ grouped[item.status].append(item)
+
+ body.extend(render_status_section("Implemented", grouped["Implemented"]))
+ body.extend(render_status_section("In Progress", grouped["In Progress"]))
+ body.extend(render_status_section("Blocked", grouped["Blocked"]))
+ body.extend(render_status_section("Outstanding", grouped["Outstanding"]))
+ body.extend(render_status_section("Unknown", grouped["Unknown"]))
+
+ body.append("## Overlap Analysis")
+ if not overlaps:
+ body.append("- None")
+ else:
+ for overlap in overlaps[:20]:
+ body.append(
+ "- "
+ f"`{overlap['source_a']}` <-> `{overlap['source_b']}` "
+ f"(similarity={overlap['similarity']}, severity={overlap['severity']})"
+ )
+ body.append(f" - A: {overlap['item_a']}")
+ body.append(f" - B: {overlap['item_b']}")
+ body.append("")
+
+ body.append("## Gap Analysis")
+ if not gaps:
+ body.append("- None")
+ else:
+ for gap in gaps[:20]:
+ body.append(
+ f"- `{gap['source']}` ({gap['risk']} risk): missing {gap['missing']} -> {gap['item']}"
+ )
+ body.append("")
+
+ body.append("## Memory Context Used")
+ body.append(f"- Used: `{str(mem['used']).lower()}`")
+ if mem["sources"]:
+ for source in mem["sources"]:
+ body.append(f"- Source: `{source}`")
+ else:
+ body.append("- Source: none available")
+ body.append("")
+
+ body.append("## Recommended Next Actions")
+ prioritized = sorted(
+ [item for item in items if item.status in {"Blocked", "Outstanding", "In Progress"}],
+ key=lambda i: (i.status != "Blocked", i.status != "Outstanding", i.confidence != "high"),
+ )
+ if not prioritized:
+ body.append("- Keep roadmap and todos synchronized with implementation references.")
+ else:
+ for item in prioritized[:7]:
+ body.append(
+ f"- [{item.status}] `{item.source}`: add owner/test/evidence updates for `{item.item}`."
+ )
+ body.append("")
+ return "\n".join(body)
+
+
+def main() -> int:
+ args = parse_args()
+ repo_root = Path(args.repo_root).resolve()
+ out_dir = Path(args.output_dir)
+ if not out_dir.is_absolute():
+ out_dir = repo_root / out_dir
+ out_dir.mkdir(parents=True, exist_ok=True)
+
+ primary, secondary = discover_sources(repo_root)
+
+ items: list[PlanItem] = []
+ for path in primary:
+ items.extend(collect_items(path, repo_root))
+ for path in secondary:
+ items.extend(collect_items(path, repo_root))
+
+ report = build_report(args.report_date, args.trigger, repo_root, primary, secondary, items)
+ dated_path = out_dir / f"plan-audit-{args.report_date}.md"
+ dated_path.write_text(report, encoding="utf-8")
+
+ latest_path = out_dir / "latest.md"
+ shutil.copyfile(dated_path, latest_path)
+ print(str(dated_path))
+ return 0
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/tests/test_workspace_primer_hooks.py b/tests/test_workspace_primer_hooks.py
index 88a313b..121e0cc 100644
--- a/tests/test_workspace_primer_hooks.py
+++ b/tests/test_workspace_primer_hooks.py
@@ -2,8 +2,10 @@
from __future__ import annotations
+import importlib.util
import json
import shutil
+import sys
import stat
import uuid
from pathlib import Path
@@ -17,6 +19,7 @@
)
_REPO_ROOT = Path(__file__).resolve().parent.parent
+_AUDIT_SCRIPT_PATH = _REPO_ROOT / "scripts" / "daily_plan_audit.py"
@pytest.fixture
@@ -30,9 +33,19 @@ def tmp_project_dir() -> Path:
shutil.rmtree(d, ignore_errors=True)
+def _load_audit_module():
+ spec = importlib.util.spec_from_file_location("daily_plan_audit", _AUDIT_SCRIPT_PATH)
+ assert spec and spec.loader
+ module = importlib.util.module_from_spec(spec)
+ sys.modules[spec.name] = module
+ spec.loader.exec_module(module)
+ return module
+
+
def test_cursor_hook_templates_exist_in_repo() -> None:
assert (CURSOR_HOOK_TEMPLATES_DIR / "hooks.json").is_file()
hooks = CURSOR_HOOK_TEMPLATES_DIR / "hooks"
+ assert (hooks / "on-stop-daily-plan-audit.sh").is_file()
assert (hooks / "on-stop-gitops.sh").is_file()
assert (hooks / "on-stop-observe.sh").is_file()
@@ -46,9 +59,11 @@ def test_deploy_cursor_hook_templates_writes_expected_paths(tmp_project_dir: Pat
assert hj.read_text(encoding="utf-8") == src_json
assert json.loads(hj.read_text(encoding="utf-8"))["version"] == 1
+ d = tmp_project_dir / ".cursor" / "hooks" / "on-stop-daily-plan-audit.sh"
g = tmp_project_dir / ".cursor" / "hooks" / "on-stop-gitops.sh"
o = tmp_project_dir / ".cursor" / "hooks" / "on-stop-observe.sh"
- assert g.is_file() and o.is_file()
+ assert d.is_file() and g.is_file() and o.is_file()
+ assert d.stat().st_mode & stat.S_IXUSR
assert g.stat().st_mode & stat.S_IXUSR
assert o.stat().st_mode & stat.S_IXUSR
@@ -70,9 +85,10 @@ def test_compact_prime_result_includes_cursor_hooks_summary() -> None:
"skipped": False,
"deployed": {
"hooks.json": {"action": "created", "backup": ""},
+ "hooks/on-stop-daily-plan-audit.sh": {"action": "created", "backup": ""},
"hooks/on-stop-gitops.sh": {"action": "created", "backup": ""},
},
- "new_files": 2,
+ "new_files": 3,
"updated_files": 0,
"skipped_existing": 0,
},
@@ -86,3 +102,59 @@ def test_compact_prime_result_includes_cursor_hooks_summary() -> None:
assert isinstance(ch, dict)
assert ch.get("deployed_summary")
assert any(x["file"] == "hooks.json" for x in ch["deployed_summary"])
+
+
+def test_daily_plan_hook_contains_once_per_day_gate() -> None:
+ hook_path = CURSOR_HOOK_TEMPLATES_DIR / "hooks" / "on-stop-daily-plan-audit.sh"
+ content = hook_path.read_text(encoding="utf-8")
+ assert "daily-plan-audit.json" in content
+ assert "LAST_RUN_DATE" in content
+ assert 'if [ "${LAST_RUN_DATE}" = "${TODAY}" ]; then' in content
+
+
+def test_daily_plan_audit_prioritizes_cursor_plan_files(tmp_project_dir: Path) -> None:
+ module = _load_audit_module()
+ (tmp_project_dir / ".cursor" / "plans").mkdir(parents=True, exist_ok=True)
+ (tmp_project_dir / ".cursor" / "plans" / "x.plan.md").write_text(
+ "# Plan\n- [ ] Outstanding item\n", encoding="utf-8"
+ )
+ (tmp_project_dir / "ROADMAP.md").write_text("# Roadmap\n- [ ] Next item\n", encoding="utf-8")
+ (tmp_project_dir / "README.md").write_text("# Readme\nUnrelated docs\n", encoding="utf-8")
+
+ primary, secondary = module.discover_sources(tmp_project_dir)
+ assert [p.relative_to(tmp_project_dir).as_posix() for p in primary] == [
+ ".cursor/plans/x.plan.md"
+ ]
+ assert "ROADMAP.md" in {p.relative_to(tmp_project_dir).as_posix() for p in secondary}
+ assert "README.md" not in {p.relative_to(tmp_project_dir).as_posix() for p in secondary}
+
+
+def test_daily_plan_audit_report_contract(tmp_project_dir: Path) -> None:
+ module = _load_audit_module()
+ (tmp_project_dir / ".cursor" / "plans").mkdir(parents=True, exist_ok=True)
+ plan_path = tmp_project_dir / ".cursor" / "plans" / "daily.plan.md"
+ plan_path.write_text(
+ "# Next\n- [ ] Add owner + tests for workflow item\n- [x] Ship core module\n",
+ encoding="utf-8",
+ )
+
+ primary, secondary = module.discover_sources(tmp_project_dir)
+ items = []
+ for src in primary + secondary:
+ items.extend(module.collect_items(src, tmp_project_dir))
+
+ report = module.build_report(
+ report_date="2026-04-26",
+ trigger="cursor-stop-daily-gated",
+ repo_root=tmp_project_dir,
+ primary=primary,
+ secondary=secondary,
+ items=items,
+ )
+
+ assert 'schema_version: "1.0"' in report
+ assert "## Status Matrix (5-State)" in report
+ assert "## Overlap Analysis" in report
+ assert "## Gap Analysis" in report
+ assert "## Memory Context Used" in report
+ assert "## Recommended Next Actions" in report
From b06f651b6f9763cb4fd9690ffd7f3656d22d3a85 Mon Sep 17 00:00:00 2001
From: ettienne <12529192+jhweb@users.noreply.github.com>
Date: Sat, 2 May 2026 17:09:50 +0200
Subject: [PATCH 06/10] feat: tighten plan audit ownership checks and add task
board
Enforce strict explicit-owner markers for audit ownership detection, ignore legacy .devdocs plan sources, and generate a daily plan task-board markdown table to keep active work tracking up to date.
Co-authored-by: Cursor
---
.../cursor/hooks/on-stop-daily-plan-audit.sh | 2 +-
scripts/daily_plan_audit.py | 128 ++++++++++++++++--
tests/test_workspace_primer_hooks.py | 47 ++++++-
3 files changed, 164 insertions(+), 13 deletions(-)
diff --git a/config/templates/cursor/hooks/on-stop-daily-plan-audit.sh b/config/templates/cursor/hooks/on-stop-daily-plan-audit.sh
index 7a1c839..fe5dde8 100644
--- a/config/templates/cursor/hooks/on-stop-daily-plan-audit.sh
+++ b/config/templates/cursor/hooks/on-stop-daily-plan-audit.sh
@@ -11,7 +11,7 @@ REPO_ROOT="$(git rev-parse --show-toplevel 2>/dev/null)" || exit 0
AUDIT_SCRIPT="${REPO_ROOT}/scripts/daily_plan_audit.py"
STATE_DIR="${REPO_ROOT}/.cursor/hooks/state"
STATE_FILE="${STATE_DIR}/daily-plan-audit.json"
-OUT_DIR="${REPO_ROOT}/create-subagent"
+OUT_DIR="${REPO_ROOT}/.braindrain/plan-reports"
TODAY="$(date +%Y-%m-%d)"
[ -f "${AUDIT_SCRIPT}" ] || exit 0
diff --git a/scripts/daily_plan_audit.py b/scripts/daily_plan_audit.py
index 6c7f891..ac8b628 100644
--- a/scripts/daily_plan_audit.py
+++ b/scripts/daily_plan_audit.py
@@ -15,7 +15,6 @@
import shutil
from collections import Counter, defaultdict
from pathlib import Path
-from typing import Iterable
SCHEMA_VERSION = "1.0"
@@ -61,6 +60,33 @@
HEADING_RE = re.compile(r"^\s*#{1,6}\s+(.*)$")
BACKTICK_RE = re.compile(r"`([^`]+)`")
PATHISH_RE = re.compile(r"\b(?:[\w.-]+/)+[\w.-]+\b")
+# Strict ownership markers only (no fuzzy "owner" substring in prose).
+OWNER_AT_RE = re.compile(r"(?:^|[\s([{<'\"]|[-*]\s+)@([a-zA-Z0-9_.-]{1,64})\b")
+OWNER_LABEL_RES = (
+ re.compile(r"\bowner\s*:\s*(\S+)", re.IGNORECASE),
+ re.compile(r"\bassignee\s*:\s*(\S+)", re.IGNORECASE),
+ re.compile(r"\bdri\s*:\s*(\S+)", re.IGNORECASE),
+)
+
+
+def has_explicit_owner(text: str) -> bool:
+ if OWNER_AT_RE.search(text):
+ return True
+ for rx in OWNER_LABEL_RES:
+ if rx.search(text):
+ return True
+ return False
+
+
+def extract_owner_display(text: str) -> str:
+ m = OWNER_AT_RE.search(text)
+ if m:
+ return f"@{m.group(1)}"
+ for rx in OWNER_LABEL_RES:
+ m2 = rx.search(text)
+ if m2:
+ return m2.group(1).strip(".,;:)]}")
+ return "—"
@dataclasses.dataclass
@@ -79,7 +105,7 @@ def parse_args() -> argparse.Namespace:
parser.add_argument("--repo-root", default=".", help="Repository root")
parser.add_argument(
"--output-dir",
- default="create-subagent",
+ default=".braindrain/plan-reports",
help="Output directory for markdown reports (repo-relative if not absolute)",
)
parser.add_argument("--report-date", default=dt.date.today().isoformat())
@@ -93,8 +119,12 @@ def is_secondary_doc(path: Path) -> bool:
return False
if "/create-subagent/" in lowered:
return False
+ if "/.braindrain/plan-reports/" in lowered:
+ return False
if "/.cursor/plans/" in lowered:
return False
+ if "/.devdocs/" in lowered:
+ return False
if "/.cursor/agents/" in lowered:
return False
if "/config/templates/" in lowered:
@@ -237,12 +267,12 @@ def detect_gaps(items: list[PlanItem]) -> list[dict[str, str]]:
if item.status not in {"Outstanding", "In Progress", "Blocked"}:
continue
lowered = item.item.lower()
- has_owner = "@" in item.item or "owner" in lowered
+ has_owner = has_explicit_owner(item.item)
has_test_hint = "test" in lowered
has_path_evidence = any("/" in ev for ev in item.evidence)
missing = []
if not has_owner:
- missing.append("owner")
+ missing.append("explicit_owner")
if not has_test_hint:
missing.append("test")
if not has_path_evidence:
@@ -261,6 +291,52 @@ def detect_gaps(items: list[PlanItem]) -> list[dict[str, str]]:
return gaps
+def render_task_board_markdown(report_date: str, items: list[PlanItem]) -> str:
+ """Single markdown table of active work, regenerated each audit run."""
+ rows: list[PlanItem] = [
+ i
+ for i in items
+ if i.status in {"Blocked", "In Progress", "Outstanding"}
+ ]
+ rows.sort(
+ key=lambda i: (
+ {"Blocked": 0, "In Progress": 1, "Outstanding": 2}[i.status],
+ i.source,
+ i.item[:80],
+ )
+ )
+ lines = [
+ "# Plan task board",
+ "",
+ f"_Generated {report_date} by `scripts/daily_plan_audit.py` (daily hook). "
+ "Do not edit by hand — ownership must use `@name`, `owner:`, `assignee:`, or `dri:`._",
+ "",
+ "| Status | Owner | Item | Source | Gaps |",
+ "|--------|-------|------|--------|------|",
+ ]
+ for item in rows:
+ gap_parts = []
+ if not has_explicit_owner(item.item):
+ gap_parts.append("explicit_owner")
+ if "test" not in item.item.lower():
+ gap_parts.append("test")
+ if not any("/" in ev for ev in item.evidence):
+ gap_parts.append("path_evidence")
+ gaps_cell = ", ".join(gap_parts) if gap_parts else "—"
+ owner_cell = extract_owner_display(item.item)
+ item_cell = item.item.replace("|", "\\|").replace("\n", " ")
+ if len(item_cell) > 120:
+ item_cell = item_cell[:117] + "..."
+ src_cell = f"`{item.source}`"
+ lines.append(
+ f"| {item.status} | {owner_cell} | {item_cell} | {src_cell} | {gaps_cell} |"
+ )
+ if not rows:
+ lines.append("| — | — | _No blocked/in-progress/outstanding items parsed._ | — | — |")
+ lines.append("")
+ return "\n".join(lines)
+
+
def memory_context(repo_root: Path) -> dict[str, object]:
candidates = [
repo_root / ".braindrain" / "AGENT_MEMORY.md",
@@ -321,10 +397,28 @@ def build_report(
mem = memory_context(repo_root)
top_risks: list[str] = []
- if any(i.status == "Blocked" for i in items):
- top_risks.append("Blocked plan items require immediate owner assignment.")
- if gaps:
- top_risks.append("Open items are missing owner/test/evidence details.")
+ blocked_no_owner = [
+ i for i in items if i.status == "Blocked" and not has_explicit_owner(i.item)
+ ]
+ if blocked_no_owner:
+ top_risks.append(
+ "Blocked items lack explicit owner markers (@, owner:, assignee:, or dri:)."
+ )
+ elif any(i.status == "Blocked" for i in items):
+ top_risks.append(
+ "Blocked items have owner markers; resolve dependencies and unblock execution."
+ )
+ if any("explicit_owner" in g["missing"] for g in gaps) and not blocked_no_owner:
+ top_risks.append(
+ "Some active items lack explicit owner markers (@, owner:, assignee:, or dri:)."
+ )
+ gap_test_or_path = [
+ g
+ for g in gaps
+ if "test" in g["missing"] or "evidence" in g["missing"]
+ ]
+ if gap_test_or_path:
+ top_risks.append("Active items are missing test hints and/or path evidence in plan text.")
if overlaps:
top_risks.append("Overlapping plan entries may create duplicated delivery work.")
if not top_risks:
@@ -450,9 +544,17 @@ def build_report(
body.append("- Keep roadmap and todos synchronized with implementation references.")
else:
for item in prioritized[:7]:
- body.append(
- f"- [{item.status}] `{item.source}`: add owner/test/evidence updates for `{item.item}`."
- )
+ wants: list[str] = []
+ if not has_explicit_owner(item.item):
+ wants.append(
+ "add explicit owner (@name or owner:/assignee:/dri:)"
+ )
+ if "test" not in item.item.lower():
+ wants.append("add test hint")
+ if not any("/" in ev for ev in item.evidence):
+ wants.append("link path evidence")
+ hint = "; ".join(wants) if wants else "review for drift vs implementation"
+ body.append(f"- [{item.status}] `{item.source}`: {hint} — `{item.item}`")
body.append("")
return "\n".join(body)
@@ -479,6 +581,10 @@ def main() -> int:
latest_path = out_dir / "latest.md"
shutil.copyfile(dated_path, latest_path)
+
+ board = render_task_board_markdown(args.report_date, items)
+ (out_dir / "plan-task-board.md").write_text(board, encoding="utf-8")
+
print(str(dated_path))
return 0
diff --git a/tests/test_workspace_primer_hooks.py b/tests/test_workspace_primer_hooks.py
index 121e0cc..2a79852 100644
--- a/tests/test_workspace_primer_hooks.py
+++ b/tests/test_workspace_primer_hooks.py
@@ -5,10 +5,11 @@
import importlib.util
import json
import shutil
-import sys
import stat
+import sys
import uuid
from pathlib import Path
+from unittest.mock import patch
import pytest
@@ -115,11 +116,15 @@ def test_daily_plan_hook_contains_once_per_day_gate() -> None:
def test_daily_plan_audit_prioritizes_cursor_plan_files(tmp_project_dir: Path) -> None:
module = _load_audit_module()
(tmp_project_dir / ".cursor" / "plans").mkdir(parents=True, exist_ok=True)
+ (tmp_project_dir / ".devdocs").mkdir(parents=True, exist_ok=True)
(tmp_project_dir / ".cursor" / "plans" / "x.plan.md").write_text(
"# Plan\n- [ ] Outstanding item\n", encoding="utf-8"
)
(tmp_project_dir / "ROADMAP.md").write_text("# Roadmap\n- [ ] Next item\n", encoding="utf-8")
(tmp_project_dir / "README.md").write_text("# Readme\nUnrelated docs\n", encoding="utf-8")
+ (tmp_project_dir / ".devdocs" / "legacy.plan.md").write_text(
+ "# Legacy Plan\n- [ ] stale task\n", encoding="utf-8"
+ )
primary, secondary = module.discover_sources(tmp_project_dir)
assert [p.relative_to(tmp_project_dir).as_posix() for p in primary] == [
@@ -127,6 +132,46 @@ def test_daily_plan_audit_prioritizes_cursor_plan_files(tmp_project_dir: Path) -
]
assert "ROADMAP.md" in {p.relative_to(tmp_project_dir).as_posix() for p in secondary}
assert "README.md" not in {p.relative_to(tmp_project_dir).as_posix() for p in secondary}
+ assert ".devdocs/legacy.plan.md" not in {
+ p.relative_to(tmp_project_dir).as_posix() for p in secondary
+ }
+
+
+def test_strict_owner_allowlist(tmp_project_dir: Path) -> None:
+ _ = tmp_project_dir
+ m = _load_audit_module()
+ assert m.has_explicit_owner("@bob fix auth")
+ assert m.has_explicit_owner("Ship owner: team-alpha")
+ assert m.has_explicit_owner("x assignee: jane")
+ assert m.has_explicit_owner("DRI: eng for rollout")
+ assert not m.has_explicit_owner("the owner should review the blocked dependency")
+ assert not m.has_explicit_owner("blocked command classes (policy)")
+
+
+def test_daily_plan_audit_writes_task_board(tmp_project_dir: Path) -> None:
+ m = _load_audit_module()
+ (tmp_project_dir / ".cursor" / "plans").mkdir(parents=True, exist_ok=True)
+ (tmp_project_dir / ".cursor" / "plans" / "board.plan.md").write_text(
+ "# Plan\n"
+ "- [ ] owner: alice first task\n"
+ "- [ ] blocked by policy with no marker\n",
+ encoding="utf-8",
+ )
+ argv = [
+ "daily_plan_audit.py",
+ "--repo-root",
+ str(tmp_project_dir),
+ "--report-date",
+ "2026-06-01",
+ ]
+ with patch.object(sys, "argv", argv):
+ assert m.main() == 0
+ reports = tmp_project_dir / ".braindrain" / "plan-reports"
+ assert (reports / "plan-task-board.md").is_file()
+ board = (reports / "plan-task-board.md").read_text(encoding="utf-8")
+ assert "# Plan task board" in board
+ assert "alice" in board
+ assert (reports / "plan-audit-2026-06-01.md").is_file()
def test_daily_plan_audit_report_contract(tmp_project_dir: Path) -> None:
From 2bb2a0445ad001409e423eab5eefb1bfd20bc5c2 Mon Sep 17 00:00:00 2001
From: ettienne <12529192+jhweb@users.noreply.github.com>
Date: Sat, 2 May 2026 17:18:16 +0200
Subject: [PATCH 07/10] docs: update daily audit report location and ownership
guidance
Clarify that daily planning audit outputs live under .braindrain/plan-reports and document explicit owner marker formats used by the audit workflow.
Co-authored-by: Cursor
---
README.md | 158 +++++++++++++++++++++++++++++++-----------------------
1 file changed, 90 insertions(+), 68 deletions(-)
diff --git a/README.md b/README.md
index 646513e..fa8c9d2 100644
--- a/README.md
+++ b/README.md
@@ -46,69 +46,81 @@ This keeps passwords/session secrets out of shareable dashboard scaffold paths a
### Environment
-| Tool | When to use |
-|---|---|
-| `get_env_context()` | **Call this first** in any session. Returns a cached snapshot of the machine: Python interpreters, package managers, installed IDEs and their MCP configs, running LLM servers, browsers, VM tools, GUI tools, CLI tools, and agent behaviour hints. Zero cost after the first probe. |
-| `refresh_env_context()` | After installing new tools, switching machines, or any time the cached data feels stale. Re-runs the full probe (~5s) and updates the cache. |
+
+| Tool | When to use |
+| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `get_env_context()` | **Call this first** in any session. Returns a cached snapshot of the machine: Python interpreters, package managers, installed IDEs and their MCP configs, running LLM servers, browsers, VM tools, GUI tools, CLI tools, and agent behaviour hints. Zero cost after the first probe. |
+| `refresh_env_context()` | After installing new tools, switching machines, or any time the cached data feels stale. Re-runs the full probe (~5s) and updates the cache. |
+
### Tool discovery
-| Tool | When to use |
-|---|---|
+
+| Tool | When to use |
+| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `search_tools(query, top_k=5)` | Before loading any external MCP tool. Searches the configured tool registry by capability. Returns lightweight references — not full definitions. Prevents loading 26K-token tool schemas unnecessarily. |
-| `get_available_tools()` | Lists all configured tools and whether they are HOT (always loaded) or deferred (loaded on demand). |
+| `get_available_tools()` | Lists all configured tools and whether they are HOT (always loaded) or deferred (loaded on demand). |
+
### Output routing
-| Tool | When to use |
-|---|---|
+
+| Tool | When to use |
+| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `route_output(text, source, intent)` | When a tool returns a large blob. Indexes it into a local FTS5 store and returns a handle + suggested queries. The raw text never enters the context window. |
-| `search_index(query, limit=5)` | Retrieve relevant chunks from a previously routed output. Use the suggested queries from `route_output` as a starting point. |
+| `search_index(query, limit=5)` | Retrieve relevant chunks from a previously routed output. Use the suggested queries from `route_output` as a starting point. |
+
### Workflows
-| Tool | When to use |
-|---|---|
-| `list_workflows()` | See what multi-step workflows are available. |
-| `prime_workspace(...)` | Prime a project for AI agent use. **Parameters** include `sync_subagents`, `sync_templates`, `bundle` (`core` default), `codex_agent_targets`, `patch_user_cursor_mcp`, `compact_mcp_response`. **First run**: auto-detects current IDE/CLI (`CURSOR_*` → `TERM_PROGRAM` → dotfolders → fallback `cursor`); response includes **`detect_method`**. Uses **`config/bundles/.yaml`** for bundle metadata. Always rewrites **minimal `.ruler/ruler.toml`** when targeting specific agents. Deploys Cursor/Codex subagent files from templates (**`subagents`**) and manages Codex **`BRAINDRAIN SUBAGENTS`** in `.codex/config.toml` when allowed (**`codex_subagent_config`**). After apply, syncs **`.cursor/rules/braindrain.mdc`** and **`project-rules.mdc`** from `.ruler/RULES.md` — see **`cursor_rules`**. When Cursor is in scope, copies **`config/templates/cursor/`** → **`.cursor/hooks.json`** and **`.cursor/hooks/*.sh`** — see **`cursor_hooks`** (create-only; **`sync_templates=true`** refreshes Ruler sources and hook templates). **`sync_subagents=true`** updates existing subagent files and managed Codex blocks (backup-first). Set **`all_agents=True`** for the full template. |
-| `init_project_memory(path, dry_run)` | Initialize project memory artifacts only (`.braindrain/AGENT_MEMORY.md` and `.cursor/hooks/state/continual-learning-index.json`). Migrates legacy `.devdocs/` on first call. Idempotent. |
-| `scriptlib_enable(path, scope, harvest, dry_run)` | Hard-opt-in project or global scriptlib. Project enable can immediately harvest reusable workspace scripts into `.scriptlib/`. |
-| `scriptlib_harvest_workspace(path, dry_run)` | Recursively copy script-like files from the workspace into the local project scriptlib catalog, honoring ignore rules. |
-| `scriptlib_search(query, ...)` | Search local and shared scriptlib entries before writing a new reusable helper script. Returns a `reuse|fork|new` recommendation. |
-| `scriptlib_describe(script_id, ...)` | Inspect metadata, scope, score, run mode, provenance, and pin/update status for one scriptlib entry. |
-| `scriptlib_run(script_id, ...)` | Execute a script through scriptlib with restored source context when paths are sensitive. |
-| `scriptlib_fork(script_id, new_variant_or_version, ...)` | Fork an existing scriptlib entry into a new version for safe edits. |
-| `scriptlib_promote(script_id, ...)` | Promote a validated project-local script into the shared personal scriptlib catalog. Requires approval. |
-| `scriptlib_list_updates(path)` | List pinned shared script artifacts with available updates for the current workspace. |
-| `scriptlib_apply_update(script_id, ...)` | Pin or upgrade a shared script artifact for the current workspace. Requires approval. |
-| `scriptlib_run_maintenance(path, scope, ...)` | Refresh indexes, surface duplicates/promotions/updates, and optionally persist new ignore dirs. |
-| `scriptlib_catalog_status(path, ...)` | Summarize project/shared roots, shared pins, promotion candidates, and update state. |
-| `scriptlib_record_result(script_id, outcome, ...)` | Update success score, mistakes, and validation state. |
-| `scriptlib_refresh_index(path, scope, dry_run)` | Rebuild project/global scriptlib indexes and generated catalogs. |
-| `plan_workflow(name, args)` | Generate a markdown execution plan and review it before committing to a run. Use before any destructive or long-running workflow. |
-| `run_workflow(name, args)` | Execute a workflow. Intermediate output is routed through the sandbox — only the final summary returns to the agent. |
+
+| Tool | When to use |
+| -------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `list_workflows()` | See what multi-step workflows are available. |
+| `prime_workspace(...)` | Prime a project for AI agent use. **Parameters** include `sync_subagents`, `sync_templates`, `bundle` (`core` default), `codex_agent_targets`, `patch_user_cursor_mcp`, `compact_mcp_response`. **First run**: auto-detects current IDE/CLI (`CURSOR_*` → `TERM_PROGRAM` → dotfolders → fallback `cursor`); response includes `**detect_method`**. Uses `**config/bundles/.yaml**` for bundle metadata. Always rewrites **minimal `.ruler/ruler.toml`** when targeting specific agents. Deploys Cursor/Codex subagent files from templates (`**subagents**`) and manages Codex `**BRAINDRAIN SUBAGENTS**` in `.codex/config.toml` when allowed (`**codex_subagent_config**`). After apply, syncs `**.cursor/rules/braindrain.mdc**` and `**project-rules.mdc**` from `.ruler/RULES.md` — see `**cursor_rules**`. When Cursor is in scope, copies `**config/templates/cursor/**` → `**.cursor/hooks.json**` and `**.cursor/hooks/*.sh**` — see `**cursor_hooks**` (create-only; `**sync_templates=true**` refreshes Ruler sources and hook templates). `**sync_subagents=true**` updates existing subagent files and managed Codex blocks (backup-first). Set `**all_agents=True**` for the full template. |
+| `init_project_memory(path, dry_run)` | Initialize project memory artifacts only (`.braindrain/AGENT_MEMORY.md` and `.cursor/hooks/state/continual-learning-index.json`). Migrates legacy `.devdocs/` on first call. Idempotent. |
+| `scriptlib_enable(path, scope, harvest, dry_run)` | Hard-opt-in project or global scriptlib. Project enable can immediately harvest reusable workspace scripts into `.scriptlib/`. |
+| `scriptlib_harvest_workspace(path, dry_run)` | Recursively copy script-like files from the workspace into the local project scriptlib catalog, honoring ignore rules. |
+| `scriptlib_search(query, ...)` | Search local and shared scriptlib entries before writing a new reusable helper script. Returns a `reuse |
+| `scriptlib_describe(script_id, ...)` | Inspect metadata, scope, score, run mode, provenance, and pin/update status for one scriptlib entry. |
+| `scriptlib_run(script_id, ...)` | Execute a script through scriptlib with restored source context when paths are sensitive. |
+| `scriptlib_fork(script_id, new_variant_or_version, ...)` | Fork an existing scriptlib entry into a new version for safe edits. |
+| `scriptlib_promote(script_id, ...)` | Promote a validated project-local script into the shared personal scriptlib catalog. Requires approval. |
+| `scriptlib_list_updates(path)` | List pinned shared script artifacts with available updates for the current workspace. |
+| `scriptlib_apply_update(script_id, ...)` | Pin or upgrade a shared script artifact for the current workspace. Requires approval. |
+| `scriptlib_run_maintenance(path, scope, ...)` | Refresh indexes, surface duplicates/promotions/updates, and optionally persist new ignore dirs. |
+| `scriptlib_catalog_status(path, ...)` | Summarize project/shared roots, shared pins, promotion candidates, and update state. |
+| `scriptlib_record_result(script_id, outcome, ...)` | Update success score, mistakes, and validation state. |
+| `scriptlib_refresh_index(path, scope, dry_run)` | Rebuild project/global scriptlib indexes and generated catalogs. |
+| `plan_workflow(name, args)` | Generate a markdown execution plan and review it before committing to a run. Use before any destructive or long-running workflow. |
+| `run_workflow(name, args)` | Execute a workflow. Intermediate output is routed through the sandbox — only the final summary returns to the agent. |
+
`list_workflows()` now includes `init_project_memory`, so agents can discover memory bootstrap as a first-class onboarding workflow.
### Telemetry
-| Tool | When to use |
-|---|---|
+
+| Tool | When to use |
+| ----------------------- | ---------------------------------------------------------------- |
| `get_token_dashboard()` | Quick snapshot of estimated tokens saved vs raw in this session. |
-| `get_token_stats()` | Full breakdown: per-tool savings, cache hits, cost avoided. |
+| `get_token_stats()` | Full breakdown: per-tool savings, cache hits, cost avoided. |
+
### Token Checkpoint Protocol
Use this cadence for consistent token observability:
-| Trigger | Required | Call |
-|---|---|---|
-| Task start | Yes | `get_token_dashboard()` |
-| Before high-cost operation | Yes | `get_token_dashboard()` |
-| After high-cost operation | Yes | `get_token_dashboard()` |
-| Milestone or phase close | Yes | `get_token_stats()` |
-| Task end | Yes | `get_token_dashboard()` then `get_token_stats()` |
-| Trivial/no-op action | Optional skip | none |
+
+| Trigger | Required | Call |
+| -------------------------- | ------------- | ------------------------------------------------ |
+| Task start | Yes | `get_token_dashboard()` |
+| Before high-cost operation | Yes | `get_token_dashboard()` |
+| After high-cost operation | Yes | `get_token_dashboard()` |
+| Milestone or phase close | Yes | `get_token_stats()` |
+| Task end | Yes | `get_token_dashboard()` then `get_token_stats()` |
+| Trivial/no-op action | Optional skip | none |
+
High-cost operations include broad searches, large-output reads, subagent batches, and long-running commands.
@@ -116,6 +128,7 @@ For large outputs, always use:
`route_output() -> search_index()`
Bad vs good large-output handling:
+
- Bad: paste a long tool dump directly into chat and then ask for analysis.
- Good: call `route_output()` on the dump, then query targeted chunks with `search_index()`.
@@ -138,6 +151,7 @@ Example line:
`{"schema_version":"1.0","timestamp":"2026-04-06T12:00:00Z","task":"token-stats-rule-system","phase":"post_high_cost","tool":"get_token_dashboard","totals":{"estimated_raw_tokens":6400,"actual_context_tokens":2100,"saved_tokens":4300},"context_tags":["docs","search"],"note":"Captured after cross-file wording audit."}`
Validation gates:
+
- PASS only if checkpoint cadence is consistent across `RULES.md`, `AGENTS.md.template`, and `.cursor/rules/agent-system.mdc`.
- PASS only if `route_output() -> search_index()` appears as the large-output path.
- FAIL if `schema_version` is omitted in JSONL examples.
@@ -145,10 +159,12 @@ Validation gates:
### Utility
-| Tool | When to use |
-|---|---|
+
+| Tool | When to use |
+| -------- | ------------------------------------------------------------- |
| `ping()` | Health check — confirms the server is running and responding. |
+
---
## Installation
@@ -222,7 +238,7 @@ Replace `/path/to/braindrain` with the absolute path to your clone. `install.sh`
### Cursor
-`.cursor/mcp.json` (project) or **`~/.cursor/mcp.json`** (global) via **Settings › Features › MCP**:
+`.cursor/mcp.json` (project) or `**~/.cursor/mcp.json`** (global) via **Settings › Features › MCP**:
```json
{
@@ -237,12 +253,14 @@ Replace `/path/to/braindrain` with the absolute path to your clone. `install.sh`
}
```
-If the MCP log shows **`[MCP Allowlist] No serverName provided for adapter`**, either add **`"serverName": "braindrain"`** on that server object in **`~/.cursor/mcp.json`**, or run **`prime_workspace(..., patch_user_cursor_mcp=true)`** once so braindrain patches the global file. `install.sh` / `configure_mcp.py` and project-level `prime_workspace` set this for generated configs; UI-created entries may omit it.
+If the MCP log shows `**[MCP Allowlist] No serverName provided for adapter**`, either add `**"serverName": "braindrain"**` on that server object in `**~/.cursor/mcp.json**`, or run `**prime_workspace(..., patch_user_cursor_mcp=true)**` once so braindrain patches the global file. `install.sh` / `configure_mcp.py` and project-level `prime_workspace` set this for generated configs; UI-created entries may omit it.
-**Large `prime_workspace` results:** the MCP tool defaults to **`compact_mcp_response=true`** (smaller JSON) to avoid **ClosedResourceError** / connection closed while returning the tool result. Set **`compact_mcp_response=false`** only if you need the full `templates.deployed` map and untruncated Ruler logs.
+**Large `prime_workspace` results:** the MCP tool defaults to `**compact_mcp_response=true`** (smaller JSON) to avoid **ClosedResourceError** / connection closed while returning the tool result. Set `**compact_mcp_response=false`** only if you need the full `templates.deployed` map and untruncated Ruler logs.
#### Multi-agent loop (Cursor)
+
This repo includes a 4-tier multi-agent system under `.cursor/`. Run:
+
- `/intake` (once per project) to generate `project-context.json`
- `/architect` to generate `PRD.md`, `TASK-GRAPH.md`, and `COORDINATOR-BRIEF.md`
- `/coordinate` to execute stages (Tier 3 `coordinator` uses `composer-2`)
@@ -279,7 +297,7 @@ This repo includes a 4-tier multi-agent system under `.cursor/`. Run:
}
```
-After saving, reload via the command palette: **`agent: reload context servers`**. braindrain will appear in the Agent panel's MCP section.
+After saving, reload via the command palette: `**agent: reload context servers**`. braindrain will appear in the Agent panel's MCP section.
### OpenCode
@@ -355,15 +373,17 @@ Main config: `config/hub_config.yaml`
Environment variables (copy `.env.example` to `.env.dev` to start):
-| Variable | Purpose |
-|---|---|
-| `BRAINDRAIN_CONFIG` | Override config file path |
-| `BRAINDRAIN_LAUNCHER_PATH` | Absolute path to the `config/braindrain` launcher. Set automatically by `install.sh`. Required by `prime_workspace()` and `configure_mcp.py`. |
-| `GITHUB_TOKEN` | Enables the deferred GitHub MCP tool |
-| `LMSTUDIO_BASE_URL` | LM Studio endpoint (default: `http://localhost:1234/v1`) |
-| `OLLAMA_HOST` | Ollama endpoint (default: `http://localhost:11434`) |
-| `OPENAI_API_KEY` | Optional — cloud embeddings / semantic search |
-| `BRAINDRAIN_DISABLE_DOCKER_SANDBOX` | Set to `1` to skip the Docker workflow sandbox |
+
+| Variable | Purpose |
+| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
+| `BRAINDRAIN_CONFIG` | Override config file path |
+| `BRAINDRAIN_LAUNCHER_PATH` | Absolute path to the `config/braindrain` launcher. Set automatically by `install.sh`. Required by `prime_workspace()` and `configure_mcp.py`. |
+| `GITHUB_TOKEN` | Enables the deferred GitHub MCP tool |
+| `LMSTUDIO_BASE_URL` | LM Studio endpoint (default: `http://localhost:1234/v1`) |
+| `OLLAMA_HOST` | Ollama endpoint (default: `http://localhost:11434`) |
+| `OPENAI_API_KEY` | Optional — cloud embeddings / semantic search |
+| `BRAINDRAIN_DISABLE_DOCKER_SANDBOX` | Set to `1` to skip the Docker workflow sandbox |
+
The server auto-loads `.env.dev` → `.env.prod` → `.env` (first found, non-overriding of existing env vars).
@@ -409,7 +429,7 @@ braindrain/
### Rule generation (AGENTS.md vs Ruler)
-- **`AGENTS.md`**: generated locally by `./install.sh` from `AGENTS.md.template` (and includes a machine-specific env block between `` / ``).
+- `**AGENTS.md**`: generated locally by `./install.sh` from `AGENTS.md.template` (and includes a machine-specific env block between `` / ``).
- **Ruler-generated dotfiles**: `./install.sh` (and the `prime_workspace()` tool) deploys `config/templates/ruler/` → `.ruler/` and runs `npx @intellectronica/ruler apply` to generate project-local agent rule files like `.cursor/rules/braindrain.mdc`, `.mcp.json`, `CLAUDE.md`, `.agent/rules/ruler.md`, etc.
- Source-of-truth for those generated rule files is `config/templates/ruler/RULES.md` (and `.ruler/ruler.toml`).
- **Important**: files like `CLAUDE.md` are **generated artifacts** (gitignored) and should be treated as **disposable**. Edit the templates instead, then re-run Ruler.
@@ -435,20 +455,22 @@ braindrain/
### Docs ownership map (token observability)
-| File/path | Ownership | Purpose |
-|---|---|---|
-| `config/templates/ruler/RULES.md` | Source-of-truth template | Canonical protocol language and trigger matrix |
-| `AGENTS.md.template` | Source template | Generated `AGENTS.md` content for protocol distribution |
-| `AGENTS.md` | Generated artifact | Do not edit directly |
-| `.cursor/rules/agent-system.mdc` | Cursor local enforcement | Immediate IDE-specific guardrails |
-| `~/.braindrain/costs/session.jsonl` | Machine-local telemetry | Runtime token telemetry source-of-truth |
-| `.braindrain/token-metrics.jsonl` | Optional machine-local artifact | Local checkpoint stream using schema `1.0` |
+
+| File/path | Ownership | Purpose |
+| ----------------------------------- | ------------------------------- | ------------------------------------------------------- |
+| `config/templates/ruler/RULES.md` | Source-of-truth template | Canonical protocol language and trigger matrix |
+| `AGENTS.md.template` | Source template | Generated `AGENTS.md` content for protocol distribution |
+| `AGENTS.md` | Generated artifact | Do not edit directly |
+| `.cursor/rules/agent-system.mdc` | Cursor local enforcement | Immediate IDE-specific guardrails |
+| `~/.braindrain/costs/session.jsonl` | Machine-local telemetry | Runtime token telemetry source-of-truth |
+| `.braindrain/token-metrics.jsonl` | Optional machine-local artifact | Local checkpoint stream using schema `1.0` |
+
---
## Memory layer status and roadmap
-**Roadmap and release TODOs** ship from the repo root as **`ROADMAP.md`** and **`TODOS.md`**. Use **`.devdocs/`** only on your machine for private drafts (that path is gitignored and must not be committed).
+**Roadmap and release TODOs** ship from the repo root as `**ROADMAP.md`** and `**TODOS.md**`. Use `**.devdocs/**` only on your machine for private drafts (that path is gitignored and must not be committed).
Implemented now (runtime behavior in this repo):
@@ -471,7 +493,7 @@ Memory artifacts and paths:
- Durable project memory path: `.braindrain/AGENT_MEMORY.md` (machine-local, gitignored).
- Incremental transcript index path: `.cursor/hooks/state/continual-learning-index.json`.
- Daily planning audit hook state path: `.cursor/hooks/state/daily-plan-audit.json`.
-- Daily planning audit reports path: `create-subagent/plan-audit-YYYY-MM-DD.md` (plus `create-subagent/latest.md`).
+- Daily planning audit reports path: `.braindrain/plan-reports/plan-audit-YYYY-MM-DD.md` (plus `.braindrain/plan-reports/latest.md` and regenerated table `.braindrain/plan-reports/plan-task-board.md`). Mark owners in plan bullets with `@name`, `owner:`, `assignee:`, or `dri:` so the audit can classify accountability.
- Dream artifacts path: `~/.braindrain/dreaming/` (`plans/`, `daily/`, `DREAMS.md`, `last_status.json`).
- `init_project_memory(path, dry_run)` bootstraps memory artifacts and is idempotent.
- `prime_workspace()` includes memory initialization in onboarding.
@@ -506,4 +528,4 @@ Braindrain is a community-first, research-driven orchestration layer focused on
Commercial use is permitted under AGPL-3.0.
However, the **Braindrain** name, branding, and project identity are governed separately by project trademark policy. Forks, research reuse, and community improvements are welcome; just avoid presenting modified or hosted versions as the official Braindrain project unless explicitly authorized.
-For full terms, see the [`LICENSE`](LICENSE) file and naming guidance in [`TRADEMARKS.md`](TRADEMARKS.md).
+For full terms, see the `[LICENSE](LICENSE)` file and naming guidance in `[TRADEMARKS.md](TRADEMARKS.md)`.
\ No newline at end of file
From e2049556f75ed29117fc634fe175e400656e3073 Mon Sep 17 00:00:00 2001
From: ettienne <12529192+jhweb@users.noreply.github.com>
Date: Sun, 3 May 2026 11:30:12 +0200
Subject: [PATCH 08/10] Plan auditor: multi-IDE discovery, ownership, master
mirror, triage queue
- Extend daily_plan_audit with plan frontmatter, env-based default owner,
IDE-tagged discovery, plan cards, master-plan mirror + drift, next-actions
- Document new report outputs in README
- Bump audit schema to 1.1 in tests
Co-authored-by: Cursor
---
.gitignore | 1 +
README.md | 9 +-
TRADEMARKS.md | 4 +-
scripts/daily_plan_audit.py | 998 ++++++++++++++++++++++++++-
tests/test_workspace_primer_hooks.py | 2 +-
5 files changed, 987 insertions(+), 27 deletions(-)
diff --git a/.gitignore b/.gitignore
index 9f576e9..ee216f8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -16,6 +16,7 @@ venv/
*.log
*.logs
+.logs
# Docs
docs/
diff --git a/README.md b/README.md
index fa8c9d2..5cfbf87 100644
--- a/README.md
+++ b/README.md
@@ -493,7 +493,14 @@ Memory artifacts and paths:
- Durable project memory path: `.braindrain/AGENT_MEMORY.md` (machine-local, gitignored).
- Incremental transcript index path: `.cursor/hooks/state/continual-learning-index.json`.
- Daily planning audit hook state path: `.cursor/hooks/state/daily-plan-audit.json`.
-- Daily planning audit reports path: `.braindrain/plan-reports/plan-audit-YYYY-MM-DD.md` (plus `.braindrain/plan-reports/latest.md` and regenerated table `.braindrain/plan-reports/plan-task-board.md`). Mark owners in plan bullets with `@name`, `owner:`, `assignee:`, or `dri:` so the audit can classify accountability.
+- Daily planning audit reports path:
+ - `.braindrain/plan-reports/plan-audit-YYYY-MM-DD.md` (full report, now plan-centric cards grouped by IDE -> disposition)
+ - `.braindrain/plan-reports/latest.md` (latest mirror)
+ - `.braindrain/plan-reports/plan-task-board.md` (active item board with IDE + inherited owner)
+ - `.braindrain/plan-reports/master-plan.md` (generated master mirror + drift detection)
+ - `.braindrain/plan-reports/next-actions.md` (verb queue: `MERGE`, `FIX`, `REPLAN`, `RESEARCH`, `IMPLEMENT`, `BACKLOG`)
+ - Primary plan discovery now scans known IDE plan dirs (`.cursor/plans`, `.codex/plans`, `.kiro/plans`, `.windsurf/plans`, etc.), and each plan/action is tagged with its IDE source.
+ - Ownership defaults to `@` from `get_env_context()` when `owner:`/`dri:` are absent. Explicit item-level owner markers (`@name`, `owner:`, `assignee:`, `dri:`) still work and override inherited ownership.
- Dream artifacts path: `~/.braindrain/dreaming/` (`plans/`, `daily/`, `DREAMS.md`, `last_status.json`).
- `init_project_memory(path, dry_run)` bootstraps memory artifacts and is idempotent.
- `prime_workspace()` includes memory initialization in onboarding.
diff --git a/TRADEMARKS.md b/TRADEMARKS.md
index dc8855b..8e3a46a 100644
--- a/TRADEMARKS.md
+++ b/TRADEMARKS.md
@@ -4,7 +4,7 @@ This policy applies to the project name **Braindrain**, any official project log
### Code license vs. brand usage
-The source code is licensed under `AGPL-3.0` in [`LICENSE`](LICENSE).
+The source code is licensed under `AGPL-3.0` in `[LICENSE](LICENSE)`.
That license governs code use, modification, and distribution.
This policy is only about avoiding confusion around the official project identity.
@@ -29,4 +29,4 @@ If you ship a fork or service, please use distinct branding unless you have expl
### Permission requests
-If you want to use the Braindrain name or branding in a way that may look official, open an issue in this repository with the intended use and context.
+If you want to use the Braindrain name or branding in a way that may look official, open an issue in this repository with the intended use and context.
\ No newline at end of file
diff --git a/scripts/daily_plan_audit.py b/scripts/daily_plan_audit.py
index ac8b628..32b8012 100644
--- a/scripts/daily_plan_audit.py
+++ b/scripts/daily_plan_audit.py
@@ -2,7 +2,7 @@
"""Generate a daily planning audit report in markdown format.
Priority source order:
-1) .cursor/plans/*.plan.md
+1) /plans/*.plan.md (cursor, codex, kiro, windsurf, ...)
2) Secondary markdown docs in repo
"""
@@ -11,13 +11,16 @@
import argparse
import dataclasses
import datetime as dt
+import getpass
+import os
import re
import shutil
+import sys
from collections import Counter, defaultdict
from pathlib import Path
-SCHEMA_VERSION = "1.0"
+SCHEMA_VERSION = "1.1"
STOP_WORDS = {
"a",
"an",
@@ -68,6 +71,197 @@
re.compile(r"\bdri\s*:\s*(\S+)", re.IGNORECASE),
)
+# Plans live under /plans/*.plan.md. The leading dot is stripped to form
+# the `ide` tag (e.g. ".cursor" -> "cursor"). Auto-detected via filesystem
+# scan, but this list bounds discovery to known IDE conventions so the audit
+# does not pick up unrelated dotfolders.
+KNOWN_IDE_DOTFOLDERS = (
+ ".cursor",
+ ".codex",
+ ".kiro",
+ ".windsurf",
+ ".cline",
+ ".roo",
+ ".zed",
+ ".aider",
+ ".continue",
+)
+
+# Plan-level disposition vocabulary. Validated when reading frontmatter.
+VALID_DISPOSITIONS = (
+ "active",
+ "research-needed",
+ "replan-needed",
+ "merge-ready",
+ "needs-fix",
+ "backlogged",
+ "scratched",
+ "implemented",
+)
+DEFAULT_DISPOSITION = "active"
+
+# Map disposition -> action verb shown in next-actions queue. The `active`
+# disposition resolves to IMPLEMENT only when item-level signals say so;
+# otherwise it stays off the triage queue. `scratched` and `implemented`
+# never appear in the queue.
+DISPOSITION_VERB = {
+ "research-needed": "RESEARCH",
+ "replan-needed": "REPLAN",
+ "merge-ready": "MERGE",
+ "needs-fix": "FIX",
+ "backlogged": "BACKLOG",
+}
+
+# Frontmatter parser regexes (no PyYAML dependency — keeps script standalone).
+FRONTMATTER_BLOCK_RE = re.compile(
+ r"\A---\s*\n(.*?)\n---\s*(?:\n|$)", re.DOTALL
+)
+FRONTMATTER_KV_RE = re.compile(
+ r"^([A-Za-z_][A-Za-z0-9_-]*)\s*:\s*(.*?)\s*$"
+)
+
+
+def _strip_quotes(value: str) -> str:
+ if len(value) >= 2 and value[0] == value[-1] and value[0] in {'"', "'"}:
+ return value[1:-1]
+ return value
+
+
+def parse_plan_frontmatter(path_or_text) -> dict[str, object]:
+ """Parse a leading YAML frontmatter block.
+
+ Supports the small subset we actually use: scalars, quoted scalars,
+ inline lists like `delegated_to: [a, b]`, and indented bullet lists:
+
+ delegated_to:
+ - gitops
+ - testops
+
+ Anything else (nested maps, anchors) is ignored — keeps the parser
+ dependency-free. Returns an empty dict when no frontmatter is present.
+ """
+ if isinstance(path_or_text, Path):
+ text = path_or_text.read_text(encoding="utf-8", errors="ignore")
+ else:
+ text = str(path_or_text or "")
+
+ match = FRONTMATTER_BLOCK_RE.match(text)
+ if not match:
+ return {}
+
+ body = match.group(1)
+ out: dict[str, object] = {}
+ current_key: str | None = None
+ current_list: list[str] | None = None
+
+ for raw in body.splitlines():
+ if not raw.strip():
+ continue
+ # Indented bullet for an active list key.
+ if current_list is not None and re.match(r"^\s+-\s+", raw):
+ value = re.sub(r"^\s+-\s+", "", raw).strip()
+ current_list.append(_strip_quotes(value))
+ continue
+ # New top-level key resets list capture.
+ kv = FRONTMATTER_KV_RE.match(raw)
+ if not kv:
+ current_list = None
+ continue
+ key = kv.group(1)
+ value = kv.group(2)
+ current_list = None
+ if not value:
+ current_list = []
+ out[key] = current_list
+ current_key = key
+ continue
+ # Inline list like `[a, b, c]`.
+ if value.startswith("[") and value.endswith("]"):
+ inner = value[1:-1].strip()
+ parts = [
+ _strip_quotes(p.strip())
+ for p in inner.split(",")
+ if p.strip()
+ ]
+ out[key] = parts
+ current_key = key
+ continue
+ out[key] = _strip_quotes(value)
+ current_key = key
+ return out
+
+
+_DEFAULT_OWNER_CACHE: str | None = None
+
+
+def resolve_default_owner(
+ repo_root: Path | None = None,
+ *,
+ refresh: bool = False,
+) -> str:
+ """Resolve the default plan owner.
+
+ Priority:
+ 1) `.braindrain/plan-config.yaml -> default_owner` (if file/key exists).
+ 2) `braindrain.env_probe.get_env_context()` -> `summary.identity.username`.
+ 3) `getpass.getuser()` / `$USER` env.
+ 4) Literal `@user` as last resort.
+
+ The result is always prefixed with `@` and cached for the run.
+ """
+ global _DEFAULT_OWNER_CACHE
+ if _DEFAULT_OWNER_CACHE and not refresh:
+ return _DEFAULT_OWNER_CACHE
+
+ handle = ""
+
+ # 1) Optional plan-config override.
+ if repo_root is not None:
+ cfg_path = repo_root / ".braindrain" / "plan-config.yaml"
+ if cfg_path.is_file():
+ try:
+ fm = parse_plan_frontmatter(
+ "---\n" + cfg_path.read_text(encoding="utf-8") + "\n---\n"
+ )
+ cfg_owner = fm.get("default_owner")
+ if isinstance(cfg_owner, str) and cfg_owner.strip():
+ handle = cfg_owner.strip().lstrip("@")
+ except Exception:
+ pass
+
+ # 2) Braindrain env_probe (sibling repo on path).
+ if not handle:
+ try:
+ repo = repo_root or Path(__file__).resolve().parent.parent
+ if str(repo) not in sys.path:
+ sys.path.insert(0, str(repo))
+ from braindrain.env_probe import get_env_context # type: ignore
+
+ ctx = get_env_context()
+ handle = (
+ ctx.get("summary", {})
+ .get("identity", {})
+ .get("username", "")
+ ) or ""
+ except Exception:
+ handle = ""
+
+ # 3) getpass / env.
+ if not handle:
+ try:
+ handle = getpass.getuser()
+ except Exception:
+ handle = os.environ.get("USER", "") or os.environ.get(
+ "LOGNAME", ""
+ )
+
+ # 4) Last resort.
+ if not handle:
+ handle = "user"
+
+ _DEFAULT_OWNER_CACHE = f"@{handle.lstrip('@')}"
+ return _DEFAULT_OWNER_CACHE
+
def has_explicit_owner(text: str) -> bool:
if OWNER_AT_RE.search(text):
@@ -100,6 +294,134 @@ class PlanItem:
tokens: set[str]
+@dataclasses.dataclass
+class Action:
+ """A concrete next-action verb attached to a plan (and optionally an item).
+
+ Verbs match the disposition + signal table:
+ RESEARCH / REPLAN / MERGE / IMPLEMENT / BACKLOG / SCRATCH / FIX.
+ Produced by ``detect_actions`` and consumed by both the per-plan cards
+ in the daily report and the next-actions triage queue.
+ """
+
+ verb: str # RESEARCH | REPLAN | MERGE | IMPLEMENT | BACKLOG | FIX
+ plan_slug: str
+ plan_source: str
+ ide: str
+ title: str # plan title for human display
+ reason: str # short human reason
+ hint: str # actionable hint
+ priority: str # P0..P3
+ item_excerpt: str = "" # optional — first item snippet that drove this
+
+
+@dataclasses.dataclass
+class PlanCard:
+ """Plan-level metadata + child item rollup.
+
+ A plan is the upper-level unit of triage. Items are rolled up into
+ `items` and a status histogram is precomputed in `counts`.
+ """
+
+ slug: str
+ title: str
+ source: str
+ ide: str
+ owner: str
+ dri: str
+ disposition: str
+ priority: str
+ parent: str
+ delegated_to: list[str]
+ is_master: bool
+ items: list[PlanItem] = dataclasses.field(default_factory=list)
+ counts: dict[str, int] = dataclasses.field(default_factory=dict)
+
+ @property
+ def is_active_for_triage(self) -> bool:
+ """True when the plan should produce next-actions output."""
+ return self.disposition not in {"scratched", "implemented"}
+
+
+def derive_ide_tag(rel_path: str) -> str:
+ """Infer the `ide` tag from a repo-relative plan path.
+
+ `.cursor/plans/x.plan.md` -> `cursor`
+ `.codex/plans/y.plan.md` -> `codex`
+ Anything else (secondary docs, legacy `.devdocs/`) -> ``.
+ """
+ parts = rel_path.split("/", 2)
+ if len(parts) >= 2 and parts[0].startswith(".") and parts[1] == "plans":
+ return parts[0][1:]
+ return ""
+
+
+def build_plan_card(
+ path: Path,
+ repo_root: Path,
+ items: list[PlanItem] | None = None,
+ *,
+ default_owner: str | None = None,
+) -> PlanCard:
+ """Read frontmatter from a plan file and assemble a PlanCard.
+
+ Falls back to sensible defaults when frontmatter is absent so plans
+ without metadata are still surfaced (just under `disposition=active`
+ with the env-resolved owner). Item list is attached unmodified.
+ """
+ rel = path.relative_to(repo_root).as_posix()
+ fm = parse_plan_frontmatter(path)
+ owner_raw = (
+ fm.get("owner")
+ or fm.get("dri")
+ or default_owner
+ or resolve_default_owner(repo_root)
+ )
+ owner = str(owner_raw).strip() if owner_raw else "@user"
+ if owner and not owner.startswith("@") and ":" not in owner:
+ owner = f"@{owner.lstrip('@')}"
+ dri_raw = fm.get("dri") or owner
+ dri = str(dri_raw).strip() if dri_raw else owner
+
+ disposition = str(fm.get("disposition") or DEFAULT_DISPOSITION).strip()
+ if disposition not in VALID_DISPOSITIONS:
+ disposition = DEFAULT_DISPOSITION
+
+ delegated = fm.get("delegated_to") or []
+ if isinstance(delegated, str):
+ delegated = [delegated]
+
+ ide_tag = str(fm.get("ide") or derive_ide_tag(rel))
+
+ title = ""
+ for line in path.read_text(encoding="utf-8", errors="ignore").splitlines():
+ m = HEADING_RE.match(line)
+ if m:
+ title = m.group(1).strip()
+ break
+ if not title:
+ title = str(fm.get("name") or path.stem)
+
+ items = items or []
+ counts = Counter(i.status for i in items)
+
+ return PlanCard(
+ slug=path.stem.replace(".plan", ""),
+ title=title,
+ source=rel,
+ ide=ide_tag,
+ owner=owner,
+ dri=dri,
+ disposition=disposition,
+ priority=str(fm.get("priority") or "P2"),
+ parent=str(fm.get("parent") or "_master"),
+ delegated_to=[str(x) for x in delegated],
+ is_master=bool(fm.get("isMaster") or fm.get("is_master")),
+ items=items,
+ counts=dict(counts),
+ )
+
+
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Daily planning audit report generator")
parser.add_argument("--repo-root", default=".", help="Repository root")
@@ -110,6 +432,14 @@ def parse_args() -> argparse.Namespace:
)
parser.add_argument("--report-date", default=dt.date.today().isoformat())
parser.add_argument("--trigger", default="cursor-stop-daily-gated")
+ parser.add_argument(
+ "--master-plan",
+ default=None,
+ help=(
+ "Path to a hand-curated `_master.plan.md`. If omitted, the auditor "
+ "auto-discovers it under known IDE plan dirs."
+ ),
+ )
return parser.parse_args()
@@ -121,22 +451,53 @@ def is_secondary_doc(path: Path) -> bool:
return False
if "/.braindrain/plan-reports/" in lowered:
return False
- if "/.cursor/plans/" in lowered:
- return False
if "/.devdocs/" in lowered:
return False
if "/.cursor/agents/" in lowered:
return False
if "/config/templates/" in lowered:
return False
+ # Exclude IDE plan directories — they're discovered as primary sources.
+ for folder in KNOWN_IDE_DOTFOLDERS:
+ if f"/{folder}/plans/" in lowered or f"/{folder}/agents/" in lowered:
+ return False
if path.suffix.lower() != ".md":
return False
filename = path.name.lower()
return any(k in filename for k in ("plan", "roadmap", "todo", "task", "backlog", "milestone", "prd"))
-def discover_sources(repo_root: Path) -> tuple[list[Path], list[Path]]:
- primary = sorted((repo_root / ".cursor" / "plans").glob("*.plan.md"))
+def discover_sources(
+ repo_root: Path,
+ *,
+ ide_dotfolders: tuple[str, ...] = KNOWN_IDE_DOTFOLDERS,
+) -> tuple[list[Path], list[Path]]:
+ """Discover primary plan files across known IDE dotfolders + secondary docs.
+
+ Primary plans live under ``/plans/*.plan.md``. The auditor scans
+ every IDE dotfolder in ``ide_dotfolders`` so plans authored for Cursor,
+ Codex, Kiro, Windsurf, etc. are all captured.
+
+ Master plans (``_master.plan.md``) are excluded from the primary list
+ because they are an index, not a workstream. Renderer reads the master
+ separately via build_plan_card.
+ """
+ primary: list[Path] = []
+ seen: set[Path] = set()
+ for folder in ide_dotfolders:
+ plans_dir = repo_root / folder / "plans"
+ if not plans_dir.is_dir():
+ continue
+ for path in sorted(plans_dir.glob("*.plan.md")):
+ resolved = path.resolve()
+ if resolved in seen:
+ continue
+ if path.name.startswith("_master"):
+ continue
+ seen.add(resolved)
+ primary.append(path)
+ primary.sort(key=lambda p: p.relative_to(repo_root).as_posix())
+
secondary: list[Path] = []
for path in repo_root.rglob("*.md"):
if is_secondary_doc(path):
@@ -147,6 +508,22 @@ def discover_sources(repo_root: Path) -> tuple[list[Path], list[Path]]:
return primary, secondary
+def discover_master_plan(
+ repo_root: Path,
+ *,
+ ide_dotfolders: tuple[str, ...] = KNOWN_IDE_DOTFOLDERS,
+) -> Path | None:
+ """Find the hand-curated `_master.plan.md` if it exists.
+
+ Search order: every IDE dotfolder in `ide_dotfolders`. First match wins.
+ """
+ for folder in ide_dotfolders:
+ candidate = repo_root / folder / "plans" / "_master.plan.md"
+ if candidate.is_file():
+ return candidate
+ return None
+
+
def tokenize(text: str) -> set[str]:
words = re.findall(r"[a-z0-9]+", text.lower())
return {w for w in words if len(w) > 2 and w not in STOP_WORDS}
@@ -261,18 +638,57 @@ def detect_overlaps(items: list[PlanItem]) -> list[dict[str, str]]:
return overlaps
-def detect_gaps(items: list[PlanItem]) -> list[dict[str, str]]:
+# Item-level delegation marker: `delegate:` followed by a non-empty target.
+# Used to detect "agent says they're handing this off but didn't say to whom".
+DELEGATION_DECLARED_RE = re.compile(r"\bdelegate(?:d_to|s_to|s|d)?\s*:", re.IGNORECASE)
+DELEGATION_TARGETED_RE = re.compile(
+ r"\bdelegate(?:d_to|s_to|s|d)?\s*:\s*(?P[A-Za-z@][A-Za-z0-9@_.-]{1,64})",
+ re.IGNORECASE,
+)
+
+
+def has_unresolved_delegation(text: str) -> bool:
+ """True when an item declares `delegate:` without a non-empty target."""
+ if not DELEGATION_DECLARED_RE.search(text):
+ return False
+ match = DELEGATION_TARGETED_RE.search(text)
+ if not match:
+ return True
+ target = match.group("target").strip(" .,;:)]}")
+ return not target
+
+
+def detect_gaps(
+ items: list[PlanItem],
+ *,
+ cards_by_source: dict[str, "PlanCard"] | None = None,
+) -> list[dict[str, str]]:
+ """Detect missing-signal gaps for active items.
+
+ When ``cards_by_source`` is provided, items inherit ownership from their
+ parent plan: an item without an `@name` is *not* flagged as missing
+ `explicit_owner` if its plan declares a non-default owner. Items that
+ declare delegation (`delegate:`) without a target raise the new
+ ``delegation_unresolved`` signal regardless of plan ownership.
+ """
+ cards_by_source = cards_by_source or {}
gaps: list[dict[str, str]] = []
for item in items:
if item.status not in {"Outstanding", "In Progress", "Blocked"}:
continue
lowered = item.item.lower()
has_owner = has_explicit_owner(item.item)
+ plan_card = cards_by_source.get(item.source)
+ plan_has_owner = bool(
+ plan_card and plan_card.owner and plan_card.owner != "@user"
+ )
has_test_hint = "test" in lowered
has_path_evidence = any("/" in ev for ev in item.evidence)
- missing = []
- if not has_owner:
+ missing: list[str] = []
+ if not has_owner and not plan_has_owner:
missing.append("explicit_owner")
+ if has_unresolved_delegation(item.item):
+ missing.append("delegation_unresolved")
if not has_test_hint:
missing.append("test")
if not has_path_evidence:
@@ -291,8 +707,145 @@ def detect_gaps(items: list[PlanItem]) -> list[dict[str, str]]:
return gaps
-def render_task_board_markdown(report_date: str, items: list[PlanItem]) -> str:
- """Single markdown table of active work, regenerated each audit run."""
+# Priority ordering used by next-actions sort.
+_PRIORITY_RANK = {"P0": 0, "P1": 1, "P2": 2, "P3": 3}
+# Verbs in the order they appear in the triage queue.
+_VERB_ORDER = (
+ "MERGE", # ship now
+ "FIX", # broken regressions
+ "REPLAN", # needs rewrite before more work
+ "RESEARCH", # unblock with investigation
+ "IMPLEMENT", # active work missing tests/evidence
+ "BACKLOG", # surfaced only for high-priority deferred plans
+)
+
+
+def _first_active_item_excerpt(card: "PlanCard") -> str:
+ for it in card.items:
+ if it.status in {"Blocked", "In Progress", "Outstanding"}:
+ text = it.item.replace("\n", " ").strip()
+ if len(text) > 140:
+ text = text[:137] + "..."
+ return text
+ return ""
+
+
+def detect_actions(
+ cards: list["PlanCard"],
+ *,
+ backlog_priority_threshold: str = "P1",
+) -> list[Action]:
+ """Translate plan dispositions + item signals into concrete next-action verbs.
+
+ Rules:
+ - ``research-needed`` -> RESEARCH
+ - ``replan-needed`` -> REPLAN
+ - ``merge-ready`` -> MERGE
+ - ``needs-fix`` -> FIX
+ - ``backlogged`` -> BACKLOG (only when priority <= threshold; default P1)
+ - ``active`` + has any blocked/outstanding/in-progress item lacking a test
+ hint -> IMPLEMENT (with the first such item as ``item_excerpt``)
+ - ``scratched`` / ``implemented`` -> excluded.
+ """
+ threshold = _PRIORITY_RANK.get(backlog_priority_threshold, 1)
+ actions: list[Action] = []
+ for card in cards:
+ if not card.is_active_for_triage:
+ continue
+ verb = DISPOSITION_VERB.get(card.disposition)
+ excerpt = _first_active_item_excerpt(card)
+
+ if card.disposition == "active":
+ has_active_item = any(
+ it.status in {"Blocked", "In Progress", "Outstanding"}
+ for it in card.items
+ )
+ if has_active_item:
+ missing_test = any(
+ "test" not in it.item.lower()
+ for it in card.items
+ if it.status in {"Blocked", "In Progress", "Outstanding"}
+ )
+ actions.append(
+ Action(
+ verb="IMPLEMENT",
+ plan_slug=card.slug,
+ plan_source=card.source,
+ ide=card.ide or "—",
+ title=card.title,
+ reason="active plan with open work",
+ hint=(
+ "add tests + evidence; ship the next item"
+ if missing_test
+ else "ship the next item"
+ ),
+ priority=card.priority,
+ item_excerpt=excerpt,
+ )
+ )
+ continue
+
+ if verb == "BACKLOG":
+ rank = _PRIORITY_RANK.get(card.priority, 3)
+ if rank > threshold:
+ continue
+
+ if not verb:
+ continue
+
+ reason = {
+ "RESEARCH": "needs investigation before commit",
+ "REPLAN": "design has drifted; rewrite required",
+ "MERGE": "implemented; ready to ship",
+ "FIX": "implemented but broken/regressed",
+ "BACKLOG": "deferred but priority-elevated",
+ }[verb]
+ hint = {
+ "RESEARCH": "answer the open question(s); update plan with findings",
+ "REPLAN": "rewrite the plan; archive the old version",
+ "MERGE": "open PR; pass CI; merge",
+ "FIX": "reproduce regression; ship fix; add regression test",
+ "BACKLOG": "decide: promote to active or scratch",
+ }[verb]
+
+ actions.append(
+ Action(
+ verb=verb,
+ plan_slug=card.slug,
+ plan_source=card.source,
+ ide=card.ide or "—",
+ title=card.title,
+ reason=reason,
+ hint=hint,
+ priority=card.priority,
+ item_excerpt=excerpt,
+ )
+ )
+
+ actions.sort(
+ key=lambda a: (
+ _VERB_ORDER.index(a.verb) if a.verb in _VERB_ORDER else 99,
+ _PRIORITY_RANK.get(a.priority, 3),
+ a.ide,
+ a.plan_slug,
+ )
+ )
+ return actions
+
+
+def render_task_board_markdown(
+ report_date: str,
+ items: list[PlanItem],
+ *,
+ cards_by_source: dict[str, "PlanCard"] | None = None,
+) -> str:
+ """Single markdown table of active work, regenerated each audit run.
+
+ When ``cards_by_source`` is provided, the Owner column inherits from the
+ parent plan when an item lacks its own ``@name`` marker, and the IDE tag
+ travels in a new ``IDE`` column.
+ """
+ cards_by_source = cards_by_source or {}
rows: list[PlanItem] = [
i
for i in items
@@ -309,30 +862,42 @@ def render_task_board_markdown(report_date: str, items: list[PlanItem]) -> str:
"# Plan task board",
"",
f"_Generated {report_date} by `scripts/daily_plan_audit.py` (daily hook). "
- "Do not edit by hand — ownership must use `@name`, `owner:`, `assignee:`, or `dri:`._",
+ "Do not edit by hand — ownership inherits from the parent plan's "
+ "frontmatter (`owner:` / `dri:`) unless an item explicitly delegates._",
"",
- "| Status | Owner | Item | Source | Gaps |",
- "|--------|-------|------|--------|------|",
+ "| IDE | Status | Owner | Item | Source | Gaps |",
+ "|-----|--------|-------|------|--------|------|",
]
for item in rows:
- gap_parts = []
- if not has_explicit_owner(item.item):
+ plan_card = cards_by_source.get(item.source)
+ plan_has_owner = bool(
+ plan_card and plan_card.owner and plan_card.owner != "@user"
+ )
+ gap_parts: list[str] = []
+ if not has_explicit_owner(item.item) and not plan_has_owner:
gap_parts.append("explicit_owner")
+ if has_unresolved_delegation(item.item):
+ gap_parts.append("delegation_unresolved")
if "test" not in item.item.lower():
gap_parts.append("test")
if not any("/" in ev for ev in item.evidence):
gap_parts.append("path_evidence")
gaps_cell = ", ".join(gap_parts) if gap_parts else "—"
- owner_cell = extract_owner_display(item.item)
+ own_from_item = extract_owner_display(item.item)
+ if own_from_item == "—" and plan_card:
+ owner_cell = plan_card.owner or "—"
+ else:
+ owner_cell = own_from_item
item_cell = item.item.replace("|", "\\|").replace("\n", " ")
if len(item_cell) > 120:
item_cell = item_cell[:117] + "..."
src_cell = f"`{item.source}`"
+ ide_cell = (plan_card.ide if plan_card and plan_card.ide else "—")
lines.append(
- f"| {item.status} | {owner_cell} | {item_cell} | {src_cell} | {gaps_cell} |"
+ f"| {ide_cell} | {item.status} | {owner_cell} | {item_cell} | {src_cell} | {gaps_cell} |"
)
if not rows:
- lines.append("| — | — | _No blocked/in-progress/outstanding items parsed._ | — | — |")
+ lines.append("| — | — | — | _No blocked/in-progress/outstanding items parsed._ | — | — |")
lines.append("")
return "\n".join(lines)
@@ -382,6 +947,338 @@ def render_status_section(title: str, items: list[PlanItem]) -> list[str]:
return lines
+# Disposition display order in plan cards.
+_DISPOSITION_ORDER = (
+ "merge-ready",
+ "needs-fix",
+ "active",
+ "research-needed",
+ "replan-needed",
+ "backlogged",
+ "scratched",
+ "implemented",
+)
+
+
+def render_plan_cards(
+ cards: list["PlanCard"],
+ actions: list[Action],
+) -> list[str]:
+ """Render per-plan cards grouped by IDE then disposition.
+
+ One card per plan with: title, slug, owner, disposition, priority,
+ item rollup counts (Implemented/Active/Blocked/Outstanding/Unknown), and
+ the top action verb (if any) so the daily report becomes plan-centric.
+ """
+ if not cards:
+ return ["## Plan Cards (by IDE)", "", "- None", ""]
+
+ actions_by_source: dict[str, list[Action]] = defaultdict(list)
+ for action in actions:
+ actions_by_source[action.plan_source].append(action)
+
+ by_ide: dict[str, list[PlanCard]] = defaultdict(list)
+ for card in cards:
+ by_ide[card.ide or "—"].append(card)
+
+ lines: list[str] = ["## Plan Cards (by IDE)", ""]
+ for ide in sorted(by_ide.keys()):
+ ide_cards = by_ide[ide]
+ lines.append(f"### IDE: `{ide}` ({len(ide_cards)} plans)")
+ lines.append("")
+ ide_cards.sort(
+ key=lambda c: (
+ _DISPOSITION_ORDER.index(c.disposition)
+ if c.disposition in _DISPOSITION_ORDER
+ else 99,
+ _PRIORITY_RANK.get(c.priority, 3),
+ c.slug,
+ )
+ )
+ current_disp = ""
+ for card in ide_cards:
+ if card.disposition != current_disp:
+ current_disp = card.disposition
+ lines.append(f"#### Disposition: `{current_disp}`")
+ lines.append("")
+ counts = card.counts or {}
+ rollup_parts = [
+ f"Implemented={counts.get('Implemented', 0)}",
+ f"InProgress={counts.get('In Progress', 0)}",
+ f"Blocked={counts.get('Blocked', 0)}",
+ f"Outstanding={counts.get('Outstanding', 0)}",
+ f"Unknown={counts.get('Unknown', 0)}",
+ ]
+ top_actions = actions_by_source.get(card.source, [])
+ top_verb = (
+ f"`{top_actions[0].verb}` -> {top_actions[0].hint}"
+ if top_actions
+ else "—"
+ )
+ delegated = (
+ ", ".join(card.delegated_to) if card.delegated_to else "none"
+ )
+ lines.append(f"- **{card.title}** (`{card.slug}`)")
+ lines.append(f" - Source: [`{card.source}`]({card.source})")
+ lines.append(
+ f" - Owner: {card.owner} (DRI: {card.dri}) — Priority: `{card.priority}`"
+ )
+ lines.append(f" - Delegated to: {delegated}")
+ lines.append(f" - Items: {' / '.join(rollup_parts)}")
+ lines.append(f" - Next action: {top_verb}")
+ if top_actions and top_actions[0].item_excerpt:
+ lines.append(
+ f" - Excerpt: _{top_actions[0].item_excerpt}_"
+ )
+ lines.append("")
+ return lines
+
+
+def build_cards_index(
+ repo_root: Path,
+ primary: list[Path],
+ items: list[PlanItem],
+ *,
+ default_owner: str | None = None,
+) -> dict[str, "PlanCard"]:
+ """Build a map of plan source path -> PlanCard with item rollup attached."""
+ items_by_source: dict[str, list[PlanItem]] = defaultdict(list)
+ for item in items:
+ items_by_source[item.source].append(item)
+ cards: dict[str, PlanCard] = {}
+ for plan_path in primary:
+ rel = plan_path.relative_to(repo_root).as_posix()
+ card = build_plan_card(
+ plan_path,
+ repo_root,
+ items=items_by_source.get(rel, []),
+ default_owner=default_owner,
+ )
+ cards[rel] = card
+ return cards
+
+
+# Markdown link extractor used for parsing the curated master plan body.
+_MD_LINK_RE = re.compile(r"\[([^\]]+)\]\(([^)]+)\)")
+
+
+def parse_master_plan(master_path: Path, repo_root: Path) -> dict[str, object]:
+ """Extract the list of child plans referenced by `_master.plan.md`.
+
+ Returns a dict with:
+ - ``frontmatter``: parsed YAML
+ - ``children``: list of repo-relative paths in declaration order
+ """
+ if not master_path.is_file():
+ return {"frontmatter": {}, "children": []}
+
+ fm = parse_plan_frontmatter(master_path)
+ text = master_path.read_text(encoding="utf-8", errors="ignore")
+ # Strip frontmatter so we don't pull links out of it.
+ body = FRONTMATTER_BLOCK_RE.sub("", text, count=1)
+ master_dir = master_path.parent
+ seen: set[str] = set()
+ children: list[str] = []
+ for label, target in _MD_LINK_RE.findall(body):
+ target = target.strip()
+ if not target or target.startswith(("http://", "https://", "#", "mailto:")):
+ continue
+ if not target.endswith(".plan.md"):
+ continue
+ # Resolve the link relative to the master file, then make repo-relative.
+ candidate = (master_dir / target).resolve()
+ try:
+ rel = candidate.relative_to(repo_root.resolve()).as_posix()
+ except ValueError:
+ continue
+ if rel in seen:
+ continue
+ seen.add(rel)
+ children.append(rel)
+ return {"frontmatter": fm, "children": children}
+
+
+def render_next_actions(
+ actions: list[Action],
+ *,
+ report_date: str = "",
+) -> str:
+ """Render the triage queue grouped by verb.
+
+ Lines look like:
+ - [cursor:plan-slug] (P1) reason — hint
+ > item excerpt (if any)
+
+ Order: verbs follow ``_VERB_ORDER`` (MERGE first), inside each verb
+ actions sort by priority then ide then slug. Empty queue still emits a
+ valid file with a friendly message so consumers can tail it safely.
+ """
+ lines: list[str] = ["# Plan triage queue (next actions)"]
+ if report_date:
+ lines.append("")
+ lines.append(
+ f"_Generated {report_date} by `scripts/daily_plan_audit.py`. "
+ "Do not edit by hand. Action a verb by editing the parent plan's "
+ "`disposition:` and re-running the audit._"
+ )
+ lines.append("")
+
+ if not actions:
+ lines.append("- _No active triage actions today._")
+ lines.append("")
+ return "\n".join(lines)
+
+ grouped: dict[str, list[Action]] = defaultdict(list)
+ for action in actions:
+ grouped[action.verb].append(action)
+
+ for verb in _VERB_ORDER:
+ bucket = grouped.get(verb, [])
+ if not bucket:
+ continue
+ lines.append(f"## {verb} ({len(bucket)})")
+ lines.append("")
+ for action in bucket:
+ tag = f"[{action.ide or '—'}:{action.plan_slug}]"
+ lines.append(
+ f"- {tag} `({action.priority})` {action.reason} — {action.hint}"
+ )
+ link = f" - Source: [`{action.plan_source}`]({action.plan_source})"
+ lines.append(link)
+ if action.item_excerpt:
+ lines.append(f" - Excerpt: _{action.item_excerpt}_")
+ lines.append("")
+ return "\n".join(lines)
+
+
+def render_master_mirror(
+ cards: list["PlanCard"],
+ master_doc: dict[str, object] | None = None,
+ *,
+ report_date: str = "",
+) -> str:
+ """Generated mirror of the master plan with rollup + drift detection.
+
+ Layout:
+ # Master plan (mirror)
+ _Generated ; do not edit by hand. Source: <_master.plan.md or "auto">_
+
+ ## IDE: cursor
+ ### Disposition: merge-ready
+ | Plan | Owner | Priority | Items | Source |
+ ### Disposition: active
+ ...
+
+ ## Drift
+ - on disk but missing from curated master: ...
+ - in curated master but missing from disk: ...
+ """
+ master_doc = master_doc or {"frontmatter": {}, "children": []}
+ children: list[str] = list(master_doc.get("children", [])) # type: ignore[arg-type]
+
+ lines: list[str] = ["# Master plan (mirror)", ""]
+ src_note = (
+ ".cursor/plans/_master.plan.md"
+ if children
+ else "auto (no curated master found)"
+ )
+ if report_date:
+ lines.append(
+ f"_Generated {report_date} by `scripts/daily_plan_audit.py`. "
+ f"Do not edit by hand. Source: {src_note}._"
+ )
+ else:
+ lines.append(
+ "_Generated by `scripts/daily_plan_audit.py`. "
+ f"Do not edit by hand. Source: {src_note}._"
+ )
+ lines.append("")
+
+ if not cards:
+ lines.append("- _No plans discovered._")
+ lines.append("")
+ return "\n".join(lines)
+
+ by_ide: dict[str, list[PlanCard]] = defaultdict(list)
+ for card in cards:
+ by_ide[card.ide or "—"].append(card)
+
+ for ide in sorted(by_ide.keys()):
+ ide_cards = by_ide[ide]
+ lines.append(f"## IDE: `{ide}` ({len(ide_cards)} plans)")
+ lines.append("")
+ ide_cards.sort(
+ key=lambda c: (
+ _DISPOSITION_ORDER.index(c.disposition)
+ if c.disposition in _DISPOSITION_ORDER
+ else 99,
+ _PRIORITY_RANK.get(c.priority, 3),
+ c.slug,
+ )
+ )
+ current_disp = ""
+ for card in ide_cards:
+ if card.disposition != current_disp:
+ if current_disp:
+ lines.append("")
+ current_disp = card.disposition
+ lines.append(f"### Disposition: `{current_disp}`")
+ lines.append("")
+ lines.append(
+ "| Plan | Owner | Priority | Items (Impl/Active/Blocked/Out/Unk) | Source |"
+ )
+ lines.append(
+ "|------|-------|----------|--------------------------------------|--------|"
+ )
+ counts = card.counts or {}
+ items_cell = (
+ f"{counts.get('Implemented', 0)}/"
+ f"{counts.get('In Progress', 0)}/"
+ f"{counts.get('Blocked', 0)}/"
+ f"{counts.get('Outstanding', 0)}/"
+ f"{counts.get('Unknown', 0)}"
+ )
+ title_cell = card.title.replace("|", "\\|")
+ if len(title_cell) > 80:
+ title_cell = title_cell[:77] + "..."
+ lines.append(
+ f"| [{title_cell}]({card.source}) "
+ f"| {card.owner} "
+ f"| {card.priority} "
+ f"| {items_cell} "
+ f"| `{card.source}` |"
+ )
+ lines.append("")
+
+ # Drift detection.
+ on_disk = {c.source for c in cards}
+ in_master = set(children)
+ only_disk = sorted(on_disk - in_master)
+ only_master = sorted(in_master - on_disk)
+
+ lines.append("## Drift")
+ lines.append("")
+ if not children:
+ lines.append("- _No curated master found at `.cursor/plans/_master.plan.md`._")
+ lines.append(
+ f"- {len(only_disk)} plans discovered on disk; "
+ "create `_master.plan.md` to formalize the index."
+ )
+ elif not only_disk and not only_master:
+ lines.append("- _No drift: curated master matches discovered plans._")
+ else:
+ if only_disk:
+ lines.append("### On disk but missing from curated master:")
+ for src in only_disk:
+ lines.append(f"- `{src}`")
+ if only_master:
+ lines.append("### In curated master but missing from disk:")
+ for src in only_master:
+ lines.append(f"- `{src}`")
+ lines.append("")
+ return "\n".join(lines)
+
+
def build_report(
report_date: str,
trigger: str,
@@ -389,16 +1286,26 @@ def build_report(
primary: list[Path],
secondary: list[Path],
items: list[PlanItem],
+ *,
+ cards_by_source: dict[str, "PlanCard"] | None = None,
) -> str:
overlaps = detect_overlaps(items)
- gaps = detect_gaps(items)
+ cards_by_source = cards_by_source or {}
+ gaps = detect_gaps(items, cards_by_source=cards_by_source)
scores = score_report(items, overlaps, gaps)
summary_counts = Counter(item.status for item in items)
mem = memory_context(repo_root)
+ def _item_has_inherited_owner(it: PlanItem) -> bool:
+ if has_explicit_owner(it.item):
+ return True
+ c = cards_by_source.get(it.source)
+ return bool(c and c.owner and c.owner != "@user")
+
top_risks: list[str] = []
blocked_no_owner = [
- i for i in items if i.status == "Blocked" and not has_explicit_owner(i.item)
+ i for i in items
+ if i.status == "Blocked" and not _item_has_inherited_owner(i)
]
if blocked_no_owner:
top_risks.append(
@@ -412,6 +1319,10 @@ def build_report(
top_risks.append(
"Some active items lack explicit owner markers (@, owner:, assignee:, or dri:)."
)
+ if any("delegation_unresolved" in g["missing"] for g in gaps):
+ top_risks.append(
+ "Some items declare `delegate:` without naming a target sub-agent."
+ )
gap_test_or_path = [
g
for g in gaps
@@ -492,6 +1403,10 @@ def build_report(
body.append(f"| {status} | {summary_counts[status]} |")
body.append("")
+ # Plan-centric cards (new in schema 1.1) — grouped by IDE then disposition.
+ actions = detect_actions(list(cards_by_source.values()))
+ body.extend(render_plan_cards(list(cards_by_source.values()), actions))
+
grouped = defaultdict(list)
for item in items:
grouped[item.status].append(item)
@@ -567,6 +1482,7 @@ def main() -> int:
out_dir = repo_root / out_dir
out_dir.mkdir(parents=True, exist_ok=True)
+ default_owner = resolve_default_owner(repo_root)
primary, secondary = discover_sources(repo_root)
items: list[PlanItem] = []
@@ -575,16 +1491,52 @@ def main() -> int:
for path in secondary:
items.extend(collect_items(path, repo_root))
- report = build_report(args.report_date, args.trigger, repo_root, primary, secondary, items)
+ cards_by_source = build_cards_index(
+ repo_root, primary, items, default_owner=default_owner
+ )
+
+ report = build_report(
+ args.report_date,
+ args.trigger,
+ repo_root,
+ primary,
+ secondary,
+ items,
+ cards_by_source=cards_by_source,
+ )
dated_path = out_dir / f"plan-audit-{args.report_date}.md"
dated_path.write_text(report, encoding="utf-8")
latest_path = out_dir / "latest.md"
shutil.copyfile(dated_path, latest_path)
- board = render_task_board_markdown(args.report_date, items)
+ board = render_task_board_markdown(
+ args.report_date, items, cards_by_source=cards_by_source
+ )
(out_dir / "plan-task-board.md").write_text(board, encoding="utf-8")
+ # Master mirror (drift-aware).
+ if args.master_plan:
+ master_candidate = Path(args.master_plan)
+ if not master_candidate.is_absolute():
+ master_candidate = repo_root / master_candidate
+ master_path = master_candidate if master_candidate.is_file() else None
+ else:
+ master_path = discover_master_plan(repo_root)
+
+ master_doc = parse_master_plan(master_path, repo_root) if master_path else None
+ mirror = render_master_mirror(
+ list(cards_by_source.values()),
+ master_doc,
+ report_date=args.report_date,
+ )
+ (out_dir / "master-plan.md").write_text(mirror, encoding="utf-8")
+
+ # Triage queue (`next-actions.md`).
+ actions = detect_actions(list(cards_by_source.values()))
+ next_actions = render_next_actions(actions, report_date=args.report_date)
+ (out_dir / "next-actions.md").write_text(next_actions, encoding="utf-8")
+
print(str(dated_path))
return 0
diff --git a/tests/test_workspace_primer_hooks.py b/tests/test_workspace_primer_hooks.py
index 2a79852..fade32c 100644
--- a/tests/test_workspace_primer_hooks.py
+++ b/tests/test_workspace_primer_hooks.py
@@ -197,7 +197,7 @@ def test_daily_plan_audit_report_contract(tmp_project_dir: Path) -> None:
items=items,
)
- assert 'schema_version: "1.0"' in report
+ assert 'schema_version: "1.1"' in report
assert "## Status Matrix (5-State)" in report
assert "## Overlap Analysis" in report
assert "## Gap Analysis" in report
From bbbde5d5ddc613fb8369215a389237fd9047460f Mon Sep 17 00:00:00 2001
From: ettienne <12529192+jhweb@users.noreply.github.com>
Date: Mon, 4 May 2026 12:45:49 +0200
Subject: [PATCH 09/10] unify subagent templates and tighten planning audit
workflow
Co-authored-by: Cursor
---
.gitignore | 3 +-
CHANGELOG.md | 4 +-
README.md | 8 +-
braindrain/server.py | 6 +-
braindrain/workspace_primer.py | 114 ++++++++++++++----
config/templates/agents/architect.md | 1 +
config/templates/agents/coordinator.md | 3 +
config/templates/agents/daily-plan-auditor.md | 59 +++++++++
config/templates/codex-subagents/architect.md | 14 ---
.../templates/codex-subagents/coordinator.md | 14 ---
config/templates/codex-subagents/embedding.md | 14 ---
config/templates/codex-subagents/gitops.md | 19 ---
config/templates/codex-subagents/intake.md | 14 ---
config/templates/codex-subagents/research.md | 14 ---
config/templates/codex-subagents/testops.md | 14 ---
config/templates/codex-subagents/toolcall.md | 14 ---
.../templates/cursor-subagents/architect.md | 14 ---
.../templates/cursor-subagents/coordinator.md | 15 ---
.../templates/cursor-subagents/embedding.md | 14 ---
config/templates/cursor-subagents/gitops.md | 19 ---
config/templates/cursor-subagents/intake.md | 14 ---
.../templates/cursor-subagents/librarian.md | 58 ---------
config/templates/cursor-subagents/research.md | 14 ---
config/templates/cursor-subagents/testops.md | 14 ---
config/templates/cursor-subagents/toolcall.md | 14 ---
config/templates/ruler/RULES.md | 13 ++
scripts/daily_plan_audit.py | 108 ++++++++++++++++-
tests/test_plan_auditor_master.py | 90 ++++++++++++++
tests/test_scriptlib.py | 4 -
tests/test_workspace_primer_hooks.py | 15 +++
30 files changed, 391 insertions(+), 330 deletions(-)
create mode 100644 config/templates/agents/daily-plan-auditor.md
delete mode 100644 config/templates/codex-subagents/architect.md
delete mode 100644 config/templates/codex-subagents/coordinator.md
delete mode 100644 config/templates/codex-subagents/embedding.md
delete mode 100644 config/templates/codex-subagents/gitops.md
delete mode 100644 config/templates/codex-subagents/intake.md
delete mode 100644 config/templates/codex-subagents/research.md
delete mode 100644 config/templates/codex-subagents/testops.md
delete mode 100644 config/templates/codex-subagents/toolcall.md
delete mode 100644 config/templates/cursor-subagents/architect.md
delete mode 100644 config/templates/cursor-subagents/coordinator.md
delete mode 100644 config/templates/cursor-subagents/embedding.md
delete mode 100644 config/templates/cursor-subagents/gitops.md
delete mode 100644 config/templates/cursor-subagents/intake.md
delete mode 100644 config/templates/cursor-subagents/librarian.md
delete mode 100644 config/templates/cursor-subagents/research.md
delete mode 100644 config/templates/cursor-subagents/testops.md
delete mode 100644 config/templates/cursor-subagents/toolcall.md
create mode 100644 tests/test_plan_auditor_master.py
diff --git a/.gitignore b/.gitignore
index ee216f8..527d586 100644
--- a/.gitignore
+++ b/.gitignore
@@ -56,6 +56,7 @@ tests/*
!tests/test_workspace_primer_hooks.py
!tests/test_scriptlib.py
!tests/test_env_probe_codex.py
+!tests/test_plan_auditor_master.py
# Ruler outputs at repo root (no leading dot)
/AGENTS.md
@@ -70,7 +71,7 @@ tests/*
!/.gitattributes
!/.gitmodules
!/.env.example
-# `.cursor/` stays fully ignored; ship agents/skills via config/templates/cursor-* (deploy: prime_workspace).
+# `.cursor/` stays fully ignored; ship subagents from config/templates/agents/ and skills from config/templates/cursor-skills/ (deploy: prime_workspace).
# If your team commits other root dotdirs (e.g. .husky), add: !/.husky/
# `.devdocs/` is covered by `/.*` above — local-only; never add ! exceptions for it.
# END BRAINDRAIN GITIGNORE PROTOCOL
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d620465..c0f6420 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,14 +14,14 @@ The format is based on keeping a clear, user-facing history. Version in `VERSION
### For contributors
-- **Cursor agents/skills**: librarian and `scriptlib-librarian` live under `config/templates/cursor-subagents/` and `config/templates/cursor-skills/`; `prime_workspace()` deploys both to `.cursor/agents/` and `.cursor/skills/`. The repo root gitignore no longer whitelists paths under `.cursor/` (Braindrain protocol: ship via templates only).
+- **Subagent templates**: single source tree `config/templates/agents/` deploys to `.cursor/agents/` and/or `.codex/agents/` depending on the resolved IDE set; duplicate `cursor-subagents/` and `codex-subagents/` template dirs were removed. Skills remain under `config/templates/cursor-skills/`. Added `daily-plan-auditor` agent and planning close-out guidance in Ruler `RULES.md`. Planning audit script moves `archived` plans into `/plans/.plan.archives/`. Tests: `tests/test_plan_auditor_master.py`.
## [1.0.3] — 2026-04-10
### For users
- You can opt into **scriptlib**: harvest and search reusable scripts, run them through MCP, and keep guidance in agent rules when scriptlib is enabled for a workspace.
-- **prime_workspace** now deploys **Cursor and Codex subagent templates** from `config/templates/cursor-subagents/` and `config/templates/codex-subagents/`, with optional `sync_subagents` and `codex_agent_targets` for Codex layout.
+- **prime_workspace** deploys Cursor/Codex subagent markdown into `.cursor/agents/` and `.codex/agents/` with optional `sync_subagents` and `codex_agent_targets`. (Older docs referred to split template trees; the repo now uses a single canonical tree — see Unreleased.)
- **Token checkpoint protocol** and optional `.braindrain/token-metrics.jsonl` schema `1.0` are documented in templates and README for consistent observability.
### For contributors
diff --git a/README.md b/README.md
index 5cfbf87..56b4944 100644
--- a/README.md
+++ b/README.md
@@ -439,10 +439,10 @@ braindrain/
- `.cursor/hooks/on-stop-gitops.sh` (TASK-GRAPH branch queueing)
- `.cursor/hooks/on-stop-daily-plan-audit.sh` (daily-gated planning audit report)
Edit templates under `config/templates/cursor/` in this repo, then re-prime consumer projects to roll out hook changes.
-- **Subagent templates**: `prime_workspace()` deploys:
- - `config/templates/cursor-subagents/` -> `.cursor/agents/`
- - `config/templates/cursor-skills/` -> `.cursor/skills/` (e.g. scriptlib-librarian)
- - `config/templates/codex-subagents/` -> `.codex/agents/` (or `codex_agent_targets`)
+- **Subagent templates**: canonical source is `config/templates/agents/*.md`. `prime_workspace()` copies that tree to:
+ - `.cursor/agents/` when Cursor is in the resolved agent set, and
+ - `.codex/agents/` when Codex is in the resolved agent set (same files; IDE-specific layout only).
+ Skills still deploy from `config/templates/cursor-skills/` -> `.cursor/skills/` (e.g. scriptlib-librarian).
Existing files are create-only by default; set `sync_subagents=true` to update with backups. `.cursor/` is gitignored at repo root; do not commit generated agent/skill files—edit templates and re-run `prime_workspace`.
- **Codex config merge**: `prime_workspace()` appends/updates a managed `BRAINDRAIN SUBAGENTS` block in `.codex/config.toml` only when allowed by policy (`sync_subagents=true` for existing files). Existing MCP server entries remain intact.
- **Project memory artifacts**: initialized by `prime_workspace()` (or `init_project_memory()`) and kept separate from generated protocol files:
diff --git a/braindrain/server.py b/braindrain/server.py
index 0844b39..9094f40 100644
--- a/braindrain/server.py
+++ b/braindrain/server.py
@@ -1173,9 +1173,9 @@ async def prime_workspace(
agents: Explicit agent ids (e.g. ["cursor", "claude"]).
dry_run: Preview changes without writing files.
sync_templates: Update existing .ruler files with timestamped backups.
- sync_subagents: Update existing .cursor/agents/*.md from
- config/templates/agents with timestamped backups (create-only by default);
- when Codex is in scope, also updates the managed block in .codex/config.toml.
+ sync_subagents: Update existing ``.cursor/agents/*.md`` and ``.codex/agents/*.md``
+ from ``config/templates/agents/`` with timestamped backups (create-only by default);
+ when Codex is in scope, also updates the managed block in ``.codex/config.toml``.
all_agents: Deploy full template and apply all configured agents.
local_only: Pass --local-only to ruler apply (default True).
patch_user_cursor_mcp: If True, also patch ~/.cursor/mcp.json with
diff --git a/braindrain/workspace_primer.py b/braindrain/workspace_primer.py
index 2bbb45b..3ab5a96 100644
--- a/braindrain/workspace_primer.py
+++ b/braindrain/workspace_primer.py
@@ -331,30 +331,25 @@ def deploy_templates(
return written
-def deploy_subagent_templates(
- target_dir: Path,
- *,
- sync_subagents: bool = False,
-) -> dict[str, dict[str, str | bool]]:
- """
- Deploy subagent markdown templates from config/templates/agents -> .cursor/agents.
+_SUBAGENT_ACTION_RANK = {
+ "updated": 4,
+ "created": 3,
+ "created_from_empty": 3,
+ "skipped_existing": 2,
+ "dry_run": 1,
+}
- Default mode is create-only:
- - missing files are created
- - existing non-empty files are preserved
- - existing empty files are filled
- When sync_subagents=True:
- - existing files are backed up to .bak. and overwritten
- """
- agents_dir = target_dir / ".cursor" / "agents"
+def _deploy_subagent_templates_to_dir(
+ *,
+ agents_dir: Path,
+ sync_subagents: bool,
+) -> dict[str, dict[str, str | bool]]:
+ """Copy ``config/templates/agents/*.md`` into ``agents_dir``."""
agents_dir.mkdir(parents=True, exist_ok=True)
written: dict[str, dict[str, str | bool]] = {}
ts = datetime.now().strftime("%Y%m%d-%H%M%S")
- if not AGENT_TEMPLATES_DIR.exists():
- return written
-
for src in sorted(AGENT_TEMPLATES_DIR.glob("*.md")):
dst = agents_dir / src.name
content = src.read_text(encoding="utf-8")
@@ -377,6 +372,79 @@ def deploy_subagent_templates(
return written
+def _merge_subagent_deploy_results(
+ left: dict[str, dict[str, str | bool]],
+ right: dict[str, dict[str, str | bool]],
+) -> dict[str, dict[str, str | bool]]:
+ """Merge per-file deploy metadata from two destination trees."""
+ merged: dict[str, dict[str, str | bool]] = dict(left)
+ for fname, meta in right.items():
+ if fname not in merged:
+ merged[fname] = dict(meta)
+ continue
+ a = str((merged[fname] or {}).get("action") or "")
+ b = str((meta or {}).get("action") or "")
+ pick = (
+ a
+ if _SUBAGENT_ACTION_RANK.get(a, 0) >= _SUBAGENT_ACTION_RANK.get(b, 0)
+ else b
+ )
+ ba = str((merged[fname] or {}).get("backup") or "")
+ bb = str((meta or {}).get("backup") or "")
+ backup = bb if pick == b and bb else ba
+ merged[fname] = {"action": pick, "backup": backup}
+ return merged
+
+
+def deploy_subagent_templates(
+ target_dir: Path,
+ *,
+ sync_subagents: bool = False,
+ to_cursor: bool = True,
+ to_codex: bool = False,
+) -> dict[str, dict[str, str | bool]]:
+ """
+ Deploy subagent markdown from ``config/templates/agents/`` into IDE agent dirs.
+
+ When ``to_cursor`` is True, writes ``.cursor/agents/*.md``.
+ When ``to_codex`` is True, writes ``.codex/agents/*.md``.
+ Both can be True so a single canonical template tree serves every IDE.
+
+ Default mode is create-only:
+ - missing files are created
+ - existing non-empty files are preserved
+ - existing empty files are filled
+
+ When sync_subagents=True:
+ - existing files are backed up to .bak. and overwritten
+ """
+ if not AGENT_TEMPLATES_DIR.exists():
+ return {}
+
+ partials: list[dict[str, dict[str, str | bool]]] = []
+ if to_cursor:
+ partials.append(
+ _deploy_subagent_templates_to_dir(
+ agents_dir=target_dir / ".cursor" / "agents",
+ sync_subagents=sync_subagents,
+ )
+ )
+ if to_codex:
+ partials.append(
+ _deploy_subagent_templates_to_dir(
+ agents_dir=target_dir / ".codex" / "agents",
+ sync_subagents=sync_subagents,
+ )
+ )
+ if not partials:
+ return {}
+
+ out = partials[0]
+ for p in partials[1:]:
+ out = _merge_subagent_deploy_results(out, p)
+ return out
+
+
def deploy_cursor_hook_templates(
target_dir: Path,
*,
@@ -1264,6 +1332,9 @@ def prime(
cursor_in_scope = bool(
all_agents or apply_agents is None or "cursor" in (apply_agents or [])
)
+ codex_in_scope = bool(
+ all_agents or apply_agents is None or "codex" in (apply_agents or [])
+ )
launcher_path = _get_launcher_path()
try:
@@ -1286,6 +1357,8 @@ def prime(
subagent_results = deploy_subagent_templates(
target_dir,
sync_subagents=sync_subagents,
+ to_cursor=cursor_in_scope,
+ to_codex=codex_in_scope,
)
cursor_hook_results = (
deploy_cursor_hook_templates(
@@ -1304,7 +1377,7 @@ def prime(
subagent_results = {
str(f.name): {"action": "dry_run", "backup": ""}
for f in AGENT_TEMPLATES_DIR.glob("*.md")
- } if AGENT_TEMPLATES_DIR.exists() else {}
+ } if (cursor_in_scope or codex_in_scope) and AGENT_TEMPLATES_DIR.exists() else {}
cursor_hook_results = (
deploy_cursor_hook_templates(
target_dir,
@@ -1363,9 +1436,6 @@ def prime(
cursor_mcp_json = proj_mcp
# Step 6: codex subagent config policy (after ruler apply to avoid overwrite).
- codex_in_scope = bool(
- all_agents or apply_agents is None or "codex" in (apply_agents or [])
- )
codex_subagent_config: dict[str, str | bool] = {"skipped": True}
if codex_in_scope:
codex_subagent_config = ensure_codex_subagent_config(
diff --git a/config/templates/agents/architect.md b/config/templates/agents/architect.md
index ca69e9b..d7f102b 100644
--- a/config/templates/agents/architect.md
+++ b/config/templates/agents/architect.md
@@ -73,5 +73,6 @@ Instructions specifically for the Sonnet coordinator:
- Prefer explicit over implicit. The coordinator and sub-agents are not you — they need precise instructions.
- Flag any ambiguities from intake as `[ASSUMPTION: ...]` inline.
- If AI compatibility was requested, include full LLMs.txt schema in PRD.
+- If you create or materially update any `*.plan.md` under an IDE `plans/` directory (for example `.cursor/plans/`), follow **Planning session close-out** in Ruler `RULES.md` (link new plans in `_master.plan.md`, then run `daily-plan-auditor` or `scripts/daily_plan_audit.py`).
After writing all files: "Architecture complete. Run /coordinate to begin execution."
diff --git a/config/templates/agents/coordinator.md b/config/templates/agents/coordinator.md
index 799786f..ffd4377 100644
--- a/config/templates/agents/coordinator.md
+++ b/config/templates/agents/coordinator.md
@@ -27,6 +27,7 @@ For each stage:
- `[GITOPS]` → delegate to `gitops` subagent
- `[TESTOPS]` → delegate to `testops` subagent (after build tasks)
- `[RESEARCH]` → delegate to `research` subagent
+ - `[PLAN AUDIT]` → delegate to `daily-plan-auditor` subagent (after planning sessions or TASK-GRAPH plan churn)
- `[SCRIPTLIB]` → delegate to `librarian` subagent
- `[EMBED]` → delegate to `embedding` subagent
- `[BUILD]` → handle directly or delegate to `toolcall` subagent
@@ -36,6 +37,8 @@ For each stage:
5. **Checkpoint** — update `.cursor/PROGRESS.md` after each task
6. **Advance** — when all stage tasks pass, move to next stage
+When you **write or materially edit** any `*.plan.md` under an IDE `plans/` tree, finish with planning close-out per Ruler `RULES.md`: update `_master.plan.md` links if needed, then invoke `daily-plan-auditor` or run `scripts/daily_plan_audit.py` (do not rely only on the daily-gated stop hook).
+
## Progress Tracking
Maintain `.cursor/PROGRESS.md`:
diff --git a/config/templates/agents/daily-plan-auditor.md b/config/templates/agents/daily-plan-auditor.md
new file mode 100644
index 0000000..7f105c7
--- /dev/null
+++ b/config/templates/agents/daily-plan-auditor.md
@@ -0,0 +1,59 @@
+---
+name: daily-plan-auditor
+description: Daily planning audit specialist. Use after planning sessions when new or updated `*.plan.md` files exist under an IDE `plans/` directory, or when the coordinator tags `[PLAN AUDIT]`. Runs `scripts/daily_plan_audit.py`, interprets drift vs `_master.plan.md`, and returns structured triage — not feature code.
+model: composer-2
+readonly: false
+is_background: true
+---
+
+# Daily Plan Auditor
+
+You maintain honest, machine-assisted visibility into planning artifacts. You do not replace human curation of `_master.plan.md`; you run the auditor, read its outputs, and report JSON.
+
+## When to run
+
+- After creating or materially editing a plan under `.cursor/plans/`, `.codex/plans/`, or another known IDE `plans/` tree (same beat as a “planning session” close-out).
+- When the coordinator (or user) invokes you with `[PLAN AUDIT]` or `/plan-audit`.
+- Optionally on a schedule via the Cursor stop hook (`on-stop-daily-plan-audit.sh`); that path is rate-limited — session-close runs are still valuable.
+
+## Commands (repo root)
+
+```bash
+python3 scripts/daily_plan_audit.py --repo-root . --report-date "$(date +%Y-%m-%d)" --trigger "manual-plan-audit"
+```
+
+For automation/tests only, `--skip-archive` avoids moving archived plans.
+
+## Archive protocol
+
+Plans move to `/plans/.plan.archives/` when:
+
+- Frontmatter includes `archived: true`, or `status: archived`, or `disposition: archived`, or
+- `_master.plan.md` lists them under `archived_plans:` or `archive:` (paths relative to that `plans/` dir).
+
+After a move, update links in `_master.plan.md` on the next edit so drift reports stay clean.
+
+## Response format
+
+Return JSON only:
+
+```json
+{
+ "taskId": "",
+ "reports": {
+ "dated": ".braindrain/plan-reports/plan-audit-YYYY-MM-DD.md",
+ "taskBoard": ".braindrain/plan-reports/plan-task-board.md",
+ "masterMirror": ".braindrain/plan-reports/master-plan.md",
+ "nextActions": ".braindrain/plan-reports/next-actions.md"
+ },
+ "summary": "one paragraph: coverage, top risk, drift vs master if any",
+ "recommendedVerbs": ["MERGE", "REPLAN", "RESEARCH", "IMPLEMENT", "FIX", "BACKLOG"],
+ "archiveMoves": [".cursor/plans/.plan.archives/example.plan.md"]
+}
+```
+
+## Rules
+
+- Prefer the generated reports under `.braindrain/plan-reports/` over re-deriving scores in chat.
+- Replan work: prefer a **new** plan file and link supersession in `_master.plan.md` rather than silently overwriting history (see coordinator / architect guidance).
+- Research-heavy follow-ups: delegate to the `research` subagent, then fold findings back into the parent plan.
diff --git a/config/templates/codex-subagents/architect.md b/config/templates/codex-subagents/architect.md
deleted file mode 100644
index 0113275..0000000
--- a/config/templates/codex-subagents/architect.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: architect
-description: Architecture planner. Use for PRD drafting, file structure design, and ordered implementation plans.
-model: inherit
-readonly: true
----
-
-You are a software architect focused on execution-ready plans.
-
-When invoked:
-1. Clarify requirements, constraints, and assumptions.
-2. Produce a concrete architecture with components and data flow.
-3. Propose file-level changes and phased implementation steps.
-4. Identify risks, edge cases, and test strategy before coding starts.
diff --git a/config/templates/codex-subagents/coordinator.md b/config/templates/codex-subagents/coordinator.md
deleted file mode 100644
index 0c85d3c..0000000
--- a/config/templates/codex-subagents/coordinator.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: coordinator
-description: Workflow orchestrator. Use to break work into stages, delegate to specialists, and track completion order.
-model: inherit
-readonly: false
----
-
-You are a coordination specialist for multi-step engineering tasks.
-
-When invoked:
-1. Convert goals into ordered milestones.
-2. Delegate each milestone to the right specialist behavior.
-3. Track dependencies and unblock the next step quickly.
-4. Return a concise progress report with next actions.
diff --git a/config/templates/codex-subagents/embedding.md b/config/templates/codex-subagents/embedding.md
deleted file mode 100644
index 278c8b9..0000000
--- a/config/templates/codex-subagents/embedding.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: embedding
-description: Semantic indexing helper. Use for embedding pipelines, retrieval tuning, and vector-search quality checks.
-model: fast
-readonly: false
----
-
-You are an embedding and retrieval specialist.
-
-When invoked:
-1. Review current chunking, embedding, and retrieval setup.
-2. Suggest measurable quality improvements (recall/precision/latency).
-3. Implement minimal safe changes when requested.
-4. Report expected impact and verification steps.
diff --git a/config/templates/codex-subagents/gitops.md b/config/templates/codex-subagents/gitops.md
deleted file mode 100644
index a8276f1..0000000
--- a/config/templates/codex-subagents/gitops.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-name: gitops
-description: Git workflow specialist. Use proactively for branch setup, commit prep, PR creation, and merge-readiness checks.
-model: inherit
-readonly: false
----
-
-You are a Git workflow specialist for this repository.
-
-When invoked:
-1. Inspect current git state (status, staged/unstaged diff, branch tracking).
-2. Propose the safest next git action based on user intent.
-3. Execute requested git operations carefully (no destructive commands unless explicitly requested).
-4. Summarize exactly what changed and what still needs manual confirmation.
-
-Guardrails:
-- Never rewrite history unless the user explicitly asks.
-- Never force-push to protected branches.
-- Confirm commit scope matches the user request.
diff --git a/config/templates/codex-subagents/intake.md b/config/templates/codex-subagents/intake.md
deleted file mode 100644
index e33057e..0000000
--- a/config/templates/codex-subagents/intake.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: intake
-description: Discovery specialist. Use first to gather scope, constraints, acceptance criteria, and environment assumptions.
-model: fast
-readonly: true
----
-
-You are an intake specialist for engineering tasks.
-
-When invoked:
-1. Gather problem statement, constraints, and expected outcome.
-2. Identify missing details and explicit assumptions.
-3. Produce a clear, testable scope definition.
-4. Hand off structured context for planning or implementation.
diff --git a/config/templates/codex-subagents/research.md b/config/templates/codex-subagents/research.md
deleted file mode 100644
index febd725..0000000
--- a/config/templates/codex-subagents/research.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: research
-description: Investigation specialist. Use for deep codebase exploration, dependency behavior checks, and evidence-backed findings.
-model: fast
-readonly: true
----
-
-You are a research specialist focused on evidence and clarity.
-
-When invoked:
-1. Gather relevant code and docs quickly.
-2. Compare alternatives and trade-offs.
-3. Cite concrete evidence for conclusions.
-4. Return actionable recommendations, not just observations.
diff --git a/config/templates/codex-subagents/testops.md b/config/templates/codex-subagents/testops.md
deleted file mode 100644
index 77db423..0000000
--- a/config/templates/codex-subagents/testops.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: testops
-description: Test execution specialist. Use proactively to run focused tests, analyze failures, and report verification evidence.
-model: fast
-readonly: false
----
-
-You are a testing and verification specialist.
-
-When invoked:
-1. Select the smallest test scope that validates the requested change.
-2. Run tests and capture failures with root-cause hints.
-3. Apply safe fixes when requested and re-run verification.
-4. Report pass/fail status with exact commands used.
diff --git a/config/templates/codex-subagents/toolcall.md b/config/templates/codex-subagents/toolcall.md
deleted file mode 100644
index c6f3d39..0000000
--- a/config/templates/codex-subagents/toolcall.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: toolcall
-description: Tool-invocation specialist. Use when MCP or external tools need strict schema validation and safe execution.
-model: inherit
-readonly: false
----
-
-You are a precise tool invocation specialist.
-
-When invoked:
-1. Read tool schemas before calling.
-2. Validate required parameters and safe defaults.
-3. Execute tool calls with minimal, auditable inputs.
-4. Return concise results and any follow-up actions.
diff --git a/config/templates/cursor-subagents/architect.md b/config/templates/cursor-subagents/architect.md
deleted file mode 100644
index 0113275..0000000
--- a/config/templates/cursor-subagents/architect.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: architect
-description: Architecture planner. Use for PRD drafting, file structure design, and ordered implementation plans.
-model: inherit
-readonly: true
----
-
-You are a software architect focused on execution-ready plans.
-
-When invoked:
-1. Clarify requirements, constraints, and assumptions.
-2. Produce a concrete architecture with components and data flow.
-3. Propose file-level changes and phased implementation steps.
-4. Identify risks, edge cases, and test strategy before coding starts.
diff --git a/config/templates/cursor-subagents/coordinator.md b/config/templates/cursor-subagents/coordinator.md
deleted file mode 100644
index 0e21cd1..0000000
--- a/config/templates/cursor-subagents/coordinator.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-name: coordinator
-description: Workflow orchestrator. Use to break work into stages, delegate to specialists, and track completion order.
-model: inherit
-readonly: false
----
-
-You are a coordination specialist for multi-step engineering tasks.
-
-When invoked:
-1. Convert goals into ordered milestones.
-2. Delegate each milestone to the right specialist behavior.
-3. Route any new freestanding reusable ops, test-helper, or command script request through librarian before build work starts.
-4. Track dependencies and unblock the next step quickly.
-5. Return a concise progress report with next actions.
diff --git a/config/templates/cursor-subagents/embedding.md b/config/templates/cursor-subagents/embedding.md
deleted file mode 100644
index 278c8b9..0000000
--- a/config/templates/cursor-subagents/embedding.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: embedding
-description: Semantic indexing helper. Use for embedding pipelines, retrieval tuning, and vector-search quality checks.
-model: fast
-readonly: false
----
-
-You are an embedding and retrieval specialist.
-
-When invoked:
-1. Review current chunking, embedding, and retrieval setup.
-2. Suggest measurable quality improvements (recall/precision/latency).
-3. Implement minimal safe changes when requested.
-4. Report expected impact and verification steps.
diff --git a/config/templates/cursor-subagents/gitops.md b/config/templates/cursor-subagents/gitops.md
deleted file mode 100644
index a8276f1..0000000
--- a/config/templates/cursor-subagents/gitops.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-name: gitops
-description: Git workflow specialist. Use proactively for branch setup, commit prep, PR creation, and merge-readiness checks.
-model: inherit
-readonly: false
----
-
-You are a Git workflow specialist for this repository.
-
-When invoked:
-1. Inspect current git state (status, staged/unstaged diff, branch tracking).
-2. Propose the safest next git action based on user intent.
-3. Execute requested git operations carefully (no destructive commands unless explicitly requested).
-4. Summarize exactly what changed and what still needs manual confirmation.
-
-Guardrails:
-- Never rewrite history unless the user explicitly asks.
-- Never force-push to protected branches.
-- Confirm commit scope matches the user request.
diff --git a/config/templates/cursor-subagents/intake.md b/config/templates/cursor-subagents/intake.md
deleted file mode 100644
index e33057e..0000000
--- a/config/templates/cursor-subagents/intake.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: intake
-description: Discovery specialist. Use first to gather scope, constraints, acceptance criteria, and environment assumptions.
-model: fast
-readonly: true
----
-
-You are an intake specialist for engineering tasks.
-
-When invoked:
-1. Gather problem statement, constraints, and expected outcome.
-2. Identify missing details and explicit assumptions.
-3. Produce a clear, testable scope definition.
-4. Hand off structured context for planning or implementation.
diff --git a/config/templates/cursor-subagents/librarian.md b/config/templates/cursor-subagents/librarian.md
deleted file mode 100644
index 8e204ea..0000000
--- a/config/templates/cursor-subagents/librarian.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-name: librarian
-description: Script library keeper. Uses scriptlib to harvest, catalog, explain, score, and run reusable scripts before new ones are written. Invoke for [SCRIPTLIB] tasks or whenever an agent needs a reusable test-helper or ops script.
-model: composer-2
-readonly: false
-is_background: false
----
-
-# Librarian Agent
-
-You are the script library keeper. Your job is to make agents reuse and curate scripts instead of rewriting them from scratch.
-
-## Startup Sequence
-
-1. Read `.cursor/skills/scriptlib-librarian/SKILL.md`
-2. Check whether scriptlib is enabled for the current workspace
-3. If disabled and the task depends on scriptlib, tell the coordinator or user exactly that
-4. Search scriptlib before proposing any new script creation
-5. Refuse to approve a new freestanding reusable script unless scriptlib has returned `reuse`, `fork`, or `new`
-
-## Core Modes
-
-- `harvest` — scan the workspace and copy useful scripts into scriptlib
-- `find` — search and rank existing script entries
-- `explain` — describe a script, why it exists, and when to use it
-- `run` — execute through scriptlib with source-context safety
-- `fork` — create a new version when reuse is close but not exact
-- `promote` — publish a validated local script into the shared personal catalog
-- `update` — pin or upgrade a shared artifact for the workspace
-- `catalog` — refresh and summarize the catalog
-- `score` — record outcomes and adjust validation state
-- `curate` — group, normalize, and maintain the library without mutating shared trust surfaces silently
-
-## Rules
-
-- Always search scriptlib before suggesting a new script
-- Prefer fork over rewrite when an existing script is within one edit of the goal
-- Treat copied test scripts as path-sensitive unless scriptlib has validated `native_copy`
-- Shared catalog mutations require explicit approval
-- Maintenance routines may update local ignore rules and surface promotion or update candidates
-- Return structured JSON, not freeform status text
-
-## Response Format
-
-```json
-{
- "taskId": "",
- "mode": "find|harvest|run|fork|promote|update|catalog|score|curate|explain",
- "status": "success|failure|disabled",
- "scriptId": "",
- "recommendation": "",
- "reuseDecision": "reuse|fork|new",
- "approvalRequired": [],
- "actions": [],
- "notes": [],
- "nextAction": ""
-}
-```
diff --git a/config/templates/cursor-subagents/research.md b/config/templates/cursor-subagents/research.md
deleted file mode 100644
index febd725..0000000
--- a/config/templates/cursor-subagents/research.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: research
-description: Investigation specialist. Use for deep codebase exploration, dependency behavior checks, and evidence-backed findings.
-model: fast
-readonly: true
----
-
-You are a research specialist focused on evidence and clarity.
-
-When invoked:
-1. Gather relevant code and docs quickly.
-2. Compare alternatives and trade-offs.
-3. Cite concrete evidence for conclusions.
-4. Return actionable recommendations, not just observations.
diff --git a/config/templates/cursor-subagents/testops.md b/config/templates/cursor-subagents/testops.md
deleted file mode 100644
index 77db423..0000000
--- a/config/templates/cursor-subagents/testops.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: testops
-description: Test execution specialist. Use proactively to run focused tests, analyze failures, and report verification evidence.
-model: fast
-readonly: false
----
-
-You are a testing and verification specialist.
-
-When invoked:
-1. Select the smallest test scope that validates the requested change.
-2. Run tests and capture failures with root-cause hints.
-3. Apply safe fixes when requested and re-run verification.
-4. Report pass/fail status with exact commands used.
diff --git a/config/templates/cursor-subagents/toolcall.md b/config/templates/cursor-subagents/toolcall.md
deleted file mode 100644
index c6f3d39..0000000
--- a/config/templates/cursor-subagents/toolcall.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-name: toolcall
-description: Tool-invocation specialist. Use when MCP or external tools need strict schema validation and safe execution.
-model: inherit
-readonly: false
----
-
-You are a precise tool invocation specialist.
-
-When invoked:
-1. Read tool schemas before calling.
-2. Validate required parameters and safe defaults.
-3. Execute tool calls with minimal, auditable inputs.
-4. Return concise results and any follow-up actions.
diff --git a/config/templates/ruler/RULES.md b/config/templates/ruler/RULES.md
index 1daf044..7cb555b 100644
--- a/config/templates/ruler/RULES.md
+++ b/config/templates/ruler/RULES.md
@@ -90,6 +90,19 @@ Example JSONL row:
+### Planning session close-out (Cursor / Codex)
+
+When you **create or finish editing** a tracked plan file under an IDE `plans/` directory (for example `.cursor/plans/*.plan.md`):
+
+**Planning-owned agents** (for example `architect`, `coordinator`, or any agent authoring `*.plan.md` in a `plans/` tree) **must** complete this close-out **before ending the turn** when a new or materially updated plan was written:
+
+1. Ensure `_master.plan.md` links any new active plans (markdown links to `*.plan.md` files).
+2. Run the planning auditor so reports stay current — either invoke the `daily-plan-auditor` subagent or run:
+ `python3 scripts/daily_plan_audit.py --repo-root . --trigger "post-planning-session"`
+ (The Cursor stop hook may also run the auditor, but it is daily-gated; session close-out should not rely on the hook alone.)
+3. For **replan** work, prefer a **new** plan file and record supersession in the master index rather than overwriting the old file in place.
+4. Mark abandoned plans `disposition: archived` (or `archived: true` / `status: archived`), or list them under `archived_plans:` / `archive:` in `_master.plan.md` frontmatter; the next auditor run moves them to `.plan.archives/` under the same `plans/` directory.
+
### Ops/docs to keep current (when behaviour/run paths/tools change)
- `.braindrain/SESSION_PROGRESS.md`
diff --git a/scripts/daily_plan_audit.py b/scripts/daily_plan_audit.py
index 32b8012..8b1e03e 100644
--- a/scripts/daily_plan_audit.py
+++ b/scripts/daily_plan_audit.py
@@ -97,6 +97,7 @@
"backlogged",
"scratched",
"implemented",
+ "archived",
)
DEFAULT_DISPOSITION = "active"
@@ -340,7 +341,7 @@ class PlanCard:
@property
def is_active_for_triage(self) -> bool:
"""True when the plan should produce next-actions output."""
- return self.disposition not in {"scratched", "implemented"}
+ return self.disposition not in {"scratched", "implemented", "archived"}
def derive_ide_tag(rel_path: str) -> str:
@@ -440,13 +441,107 @@ def parse_args() -> argparse.Namespace:
"auto-discovers it under known IDE plan dirs."
),
)
+ parser.add_argument(
+ "--skip-archive",
+ action="store_true",
+ help="Do not move plans marked archived into .plan.archives/ (for tests).",
+ )
return parser.parse_args()
+def plan_marked_archived(fm: dict[str, object]) -> bool:
+ """True when frontmatter says this plan should live under ``.plan.archives/``."""
+ disp = str(fm.get("disposition") or "").strip().lower()
+ if disp == "archived":
+ return True
+ st = str(fm.get("status") or "").strip().lower()
+ if st == "archived":
+ return True
+ arch = fm.get("archived")
+ if isinstance(arch, bool):
+ return arch
+ if str(arch).strip().lower() in ("true", "yes", "1"):
+ return True
+ return False
+
+
+def relocate_archived_plans(repo_root: Path) -> list[str]:
+ """Move archived ``*.plan.md`` files into ``/plans/.plan.archives/``.
+
+ A plan is archived when its own frontmatter matches `plan_marked_archived`, or
+ when ``_master.plan.md`` lists it under ``archived_plans:`` or ``archive:``
+ (YAML list of paths relative to that ``plans/`` directory).
+
+ Returns repo-relative paths of files **after** the move (under ``.plan.archives/``).
+ """
+ moved_to: list[str] = []
+ ts = dt.datetime.now().strftime("%Y%m%d-%H%M%S")
+
+ for folder in KNOWN_IDE_DOTFOLDERS:
+ plans_dir = repo_root / folder / "plans"
+ if not plans_dir.is_dir():
+ continue
+ archive_dir = plans_dir / ".plan.archives"
+ to_move: set[Path] = set()
+
+ master_path = plans_dir / "_master.plan.md"
+ if master_path.is_file():
+ mfm = parse_plan_frontmatter(master_path)
+ raw = mfm.get("archived_plans")
+ if raw is None:
+ raw = mfm.get("archive")
+ if isinstance(raw, str):
+ raw = [raw]
+ if isinstance(raw, list):
+ for entry in raw:
+ rel = str(entry).strip().strip('"').strip("'")
+ if not rel or rel.startswith(("/", "http://", "https://")):
+ continue
+ candidate = (plans_dir / rel).resolve()
+ try:
+ candidate.relative_to(repo_root.resolve())
+ except ValueError:
+ continue
+ if candidate.is_file() and candidate.suffix.lower() == ".md":
+ to_move.add(candidate)
+
+ for path in sorted(plans_dir.glob("*.plan.md")):
+ if path.name.startswith("_master"):
+ continue
+ if plan_marked_archived(parse_plan_frontmatter(path)):
+ to_move.add(path.resolve())
+
+ for src in sorted(to_move, key=lambda p: p.as_posix()):
+ if not src.is_file():
+ continue
+ if src.name.startswith("_master"):
+ continue
+ try:
+ rel_check = src.resolve().relative_to(plans_dir.resolve())
+ except ValueError:
+ continue
+ if rel_check.parts[:1] == (".plan.archives",):
+ continue
+
+ archive_dir.mkdir(parents=True, exist_ok=True)
+ dest = archive_dir / src.name
+ if dest.exists():
+ dest = archive_dir / f"{src.stem}.bak.{ts}{src.suffix}"
+ shutil.move(str(src), str(dest))
+ try:
+ moved_to.append(dest.resolve().relative_to(repo_root.resolve()).as_posix())
+ except ValueError:
+ moved_to.append(dest.as_posix())
+
+ return moved_to
+
+
def is_secondary_doc(path: Path) -> bool:
lowered = path.as_posix().lower()
if "/.git/" in lowered:
return False
+ if "/.plan.archives/" in lowered:
+ return False
if "/create-subagent/" in lowered:
return False
if "/.braindrain/plan-reports/" in lowered:
@@ -1482,6 +1577,10 @@ def main() -> int:
out_dir = repo_root / out_dir
out_dir.mkdir(parents=True, exist_ok=True)
+ archive_moves: list[str] = []
+ if not getattr(args, "skip_archive", False):
+ archive_moves = relocate_archived_plans(repo_root)
+
default_owner = resolve_default_owner(repo_root)
primary, secondary = discover_sources(repo_root)
@@ -1504,6 +1603,13 @@ def main() -> int:
items,
cards_by_source=cards_by_source,
)
+ if archive_moves:
+ report = (
+ report
+ + "\n\n## Archived plan files (this run)\n\n"
+ + "\n".join(f"- `{p}`" for p in archive_moves)
+ + "\n"
+ )
dated_path = out_dir / f"plan-audit-{args.report_date}.md"
dated_path.write_text(report, encoding="utf-8")
diff --git a/tests/test_plan_auditor_master.py b/tests/test_plan_auditor_master.py
new file mode 100644
index 0000000..b6e5ae8
--- /dev/null
+++ b/tests/test_plan_auditor_master.py
@@ -0,0 +1,90 @@
+"""Tests for plan archive relocation and master-list archive hints."""
+
+from __future__ import annotations
+
+import importlib.util
+import sys
+import uuid
+from pathlib import Path
+
+import pytest
+
+_REPO_ROOT = Path(__file__).resolve().parent.parent
+_AUDIT_SCRIPT_PATH = _REPO_ROOT / "scripts" / "daily_plan_audit.py"
+
+
+@pytest.fixture
+def tmp_project_dir() -> Path:
+ d = _REPO_ROOT / ".pytest_tmp" / f"audit-{uuid.uuid4().hex[:12]}"
+ d.mkdir(parents=True, exist_ok=True)
+ try:
+ yield d
+ finally:
+ import shutil
+
+ shutil.rmtree(d, ignore_errors=True)
+
+
+def _load_audit_module():
+ spec = importlib.util.spec_from_file_location("daily_plan_audit", _AUDIT_SCRIPT_PATH)
+ assert spec and spec.loader
+ module = importlib.util.module_from_spec(spec)
+ sys.modules[spec.name] = module
+ spec.loader.exec_module(module)
+ return module
+
+
+def test_relocate_archived_plan_frontmatter(tmp_project_dir: Path) -> None:
+ m = _load_audit_module()
+ plans = tmp_project_dir / ".cursor" / "plans"
+ plans.mkdir(parents=True)
+ (plans / "stale.plan.md").write_text(
+ "---\narchived: true\n---\n\n# Stale\n- [ ] done\n",
+ encoding="utf-8",
+ )
+ moved = m.relocate_archived_plans(tmp_project_dir)
+ assert moved
+ assert any("stale.plan.md" in p for p in moved)
+ assert not (plans / "stale.plan.md").is_file()
+ assert (plans / ".plan.archives" / "stale.plan.md").is_file()
+
+
+def test_relocate_archived_via_master_list(tmp_project_dir: Path) -> None:
+ m = _load_audit_module()
+ plans = tmp_project_dir / ".cursor" / "plans"
+ plans.mkdir(parents=True)
+ (plans / "gone.plan.md").write_text("# Gone\n- [ ] x\n", encoding="utf-8")
+ (plans / "_master.plan.md").write_text(
+ "---\n"
+ "archived_plans:\n"
+ " - gone.plan.md\n"
+ "---\n\n"
+ "# Master\n",
+ encoding="utf-8",
+ )
+ moved = m.relocate_archived_plans(tmp_project_dir)
+ assert moved
+ assert not (plans / "gone.plan.md").is_file()
+ assert (plans / ".plan.archives" / "gone.plan.md").is_file()
+
+
+def test_relocate_archived_via_master_scalar_archive_key(tmp_project_dir: Path) -> None:
+ m = _load_audit_module()
+ plans = tmp_project_dir / ".cursor" / "plans"
+ plans.mkdir(parents=True)
+ (plans / "solo.plan.md").write_text("# Solo\n", encoding="utf-8")
+ (plans / "_master.plan.md").write_text(
+ "---\narchive: solo.plan.md\n---\n\n# Master\n",
+ encoding="utf-8",
+ )
+ moved = m.relocate_archived_plans(tmp_project_dir)
+ assert moved
+ assert not (plans / "solo.plan.md").is_file()
+ assert (plans / ".plan.archives" / "solo.plan.md").is_file()
+
+
+def test_plan_marked_archived_disposition() -> None:
+ m = _load_audit_module()
+ assert m.plan_marked_archived({"disposition": "archived"})
+ assert m.plan_marked_archived({"status": "archived"})
+ assert not m.plan_marked_archived({"disposition": "active"})
diff --git a/tests/test_scriptlib.py b/tests/test_scriptlib.py
index 1b65fab..8d761c7 100644
--- a/tests/test_scriptlib.py
+++ b/tests/test_scriptlib.py
@@ -211,14 +211,10 @@ def test_librarian_templates_require_reuse_decision():
agent = Path("/Volumes/devnvme/Development/BRAIN_MCP_HUB/config/templates/agents/librarian.md").read_text(
encoding="utf-8"
)
- cursor_agent = Path(
- "/Volumes/devnvme/Development/BRAIN_MCP_HUB/config/templates/cursor-subagents/librarian.md"
- ).read_text(encoding="utf-8")
assert "reuseDecision" in skill
assert "must not be written until" in skill
assert "approvalRequired" in agent
- assert "approvalRequired" in cursor_agent
def test_deploy_templates_includes_guidance_only_when_enabled(tmp_path, monkeypatch):
diff --git a/tests/test_workspace_primer_hooks.py b/tests/test_workspace_primer_hooks.py
index fade32c..c72a9c0 100644
--- a/tests/test_workspace_primer_hooks.py
+++ b/tests/test_workspace_primer_hooks.py
@@ -17,6 +17,7 @@
CURSOR_HOOK_TEMPLATES_DIR,
compact_prime_result_for_mcp,
deploy_cursor_hook_templates,
+ deploy_subagent_templates,
)
_REPO_ROOT = Path(__file__).resolve().parent.parent
@@ -78,6 +79,20 @@ def test_deploy_cursor_hook_templates_skips_existing_without_sync(tmp_project_di
assert all(v.get("action") == "skipped_existing" for v in out2.values())
+def test_deploy_subagent_templates_writes_codex_agents(tmp_project_dir: Path) -> None:
+ out = deploy_subagent_templates(
+ tmp_project_dir,
+ sync_subagents=False,
+ to_cursor=False,
+ to_codex=True,
+ )
+ assert out
+ codex_agents = tmp_project_dir / ".codex" / "agents"
+ assert codex_agents.is_dir()
+ assert (codex_agents / "coordinator.md").is_file()
+ assert (codex_agents / "daily-plan-auditor.md").is_file()
+
+
def test_compact_prime_result_includes_cursor_hooks_summary() -> None:
prime_like = {
"ok": True,
From 86ecfcdb3088ef60b0d4aa5bfdd713e302d8de7c Mon Sep 17 00:00:00 2001
From: ettienne <12529192+jhweb@users.noreply.github.com>
Date: Wed, 6 May 2026 17:41:03 +0200
Subject: [PATCH 10/10] feat: add model provenance tracking for planning
workflows
Add configurable provenance settings and propagate model/date metadata through plan audit outputs and agent templates so planning runs can be traced by model and cursor mode. This also hardens the observe stop-hook output contract to avoid hook JSON parsing regressions.
Co-authored-by: Cursor
---
CHANGELOG.md | 3 +
README.md | 32 ++-
braindrain/config.py | 1 +
braindrain/server.py | 105 ++++++++
braindrain/types.py | 1 +
config/hub_config.yaml | 14 ++
config/templates/agents/coordinator.md | 13 +
config/templates/agents/daily-plan-auditor.md | 6 +
.../templates/cursor/hooks/on-stop-observe.sh | 3 +-
config/templates/ruler/RULES.md | 18 ++
scripts/daily_plan_audit.py | 231 +++++++++++++++++-
tests/test_plan_auditor_master.py | 46 ++++
tests/test_workspace_primer_hooks.py | 42 ++++
13 files changed, 506 insertions(+), 9 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c0f6420..5c452da 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,10 +11,13 @@ The format is based on keeping a clear, user-facing history. Version in `VERSION
- **Scriptlib modularization**: scriptlib now treats project-local `.scriptlib/` and shared `~/.braindrain/scriptlib` as distinct layers, with promotion-only flow into the shared personal catalog.
- **New scriptlib MCP tools**: added promote, update discovery/application, maintenance, and catalog status flows for local/shared script operations.
- **Librarian-first routing**: freestanding reusable scripts are now expected to go through librarian decision flow (`reuse`, `fork`, or `new`) before a fresh script is created.
+- **Model provenance controls**: added `provenance` config toggles for chat footer scope, plan metadata stamping, and subagent model tracing (`.braindrain/plan-reports/model-trace.jsonl`), plus audit report frontmatter fields for model/date/cursor mode attribution.
+- **Cursor stop hook stability**: `on-stop-observe.sh` is now output-silent by default so Cursor stop-hook JSON parsing is not broken by plain-text stdout.
### For contributors
- **Subagent templates**: single source tree `config/templates/agents/` deploys to `.cursor/agents/` and/or `.codex/agents/` depending on the resolved IDE set; duplicate `cursor-subagents/` and `codex-subagents/` template dirs were removed. Skills remain under `config/templates/cursor-skills/`. Added `daily-plan-auditor` agent and planning close-out guidance in Ruler `RULES.md`. Planning audit script moves `archived` plans into `/plans/.plan.archives/`. Tests: `tests/test_plan_auditor_master.py`.
+- Added provenance-aware runtime/tooling paths in `braindrain/server.py`, `braindrain/config.py`, `braindrain/types.py`, and `scripts/daily_plan_audit.py`, with tests in `tests/test_plan_auditor_master.py` and `tests/test_provenance_config.py`.
## [1.0.3] — 2026-04-10
diff --git a/README.md b/README.md
index 56b4944..f3196a2 100644
--- a/README.md
+++ b/README.md
@@ -77,7 +77,7 @@ This keeps passwords/session secrets out of shareable dashboard scaffold paths a
| Tool | When to use |
| -------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `list_workflows()` | See what multi-step workflows are available. |
-| `prime_workspace(...)` | Prime a project for AI agent use. **Parameters** include `sync_subagents`, `sync_templates`, `bundle` (`core` default), `codex_agent_targets`, `patch_user_cursor_mcp`, `compact_mcp_response`. **First run**: auto-detects current IDE/CLI (`CURSOR_*` → `TERM_PROGRAM` → dotfolders → fallback `cursor`); response includes `**detect_method`**. Uses `**config/bundles/.yaml**` for bundle metadata. Always rewrites **minimal `.ruler/ruler.toml`** when targeting specific agents. Deploys Cursor/Codex subagent files from templates (`**subagents**`) and manages Codex `**BRAINDRAIN SUBAGENTS**` in `.codex/config.toml` when allowed (`**codex_subagent_config**`). After apply, syncs `**.cursor/rules/braindrain.mdc**` and `**project-rules.mdc**` from `.ruler/RULES.md` — see `**cursor_rules**`. When Cursor is in scope, copies `**config/templates/cursor/**` → `**.cursor/hooks.json**` and `**.cursor/hooks/*.sh**` — see `**cursor_hooks**` (create-only; `**sync_templates=true**` refreshes Ruler sources and hook templates). `**sync_subagents=true**` updates existing subagent files and managed Codex blocks (backup-first). Set `**all_agents=True**` for the full template. |
+| `prime_workspace(...)` | Prime a project for AI agent use. **Parameters** include `sync_subagents`, `sync_templates`, `bundle` (`core` default), `codex_agent_targets`, `patch_user_cursor_mcp`, `compact_mcp_response`. **First run**: auto-detects current IDE/CLI (`CURSOR_*` → `TERM_PROGRAM` → dotfolders → fallback `cursor`); response includes `**detect_method`**. Uses `**config/bundles/.yaml`** for bundle metadata. Always rewrites **minimal `.ruler/ruler.toml`** when targeting specific agents. Deploys Cursor/Codex subagent files from templates (`**subagents**`) and manages Codex `**BRAINDRAIN SUBAGENTS**` in `.codex/config.toml` when allowed (`**codex_subagent_config**`). After apply, syncs `**.cursor/rules/braindrain.mdc**` and `**project-rules.mdc**` from `.ruler/RULES.md` — see `**cursor_rules**`. When Cursor is in scope, copies `**config/templates/cursor/**` → `**.cursor/hooks.json**` and `**.cursor/hooks/*.sh**` — see `**cursor_hooks**` (create-only; `**sync_templates=true**` refreshes Ruler sources and hook templates). `**sync_subagents=true**` updates existing subagent files and managed Codex blocks (backup-first). Set `**all_agents=True**` for the full template. |
| `init_project_memory(path, dry_run)` | Initialize project memory artifacts only (`.braindrain/AGENT_MEMORY.md` and `.cursor/hooks/state/continual-learning-index.json`). Migrates legacy `.devdocs/` on first call. Idempotent. |
| `scriptlib_enable(path, scope, harvest, dry_run)` | Hard-opt-in project or global scriptlib. Project enable can immediately harvest reusable workspace scripts into `.scriptlib/`. |
| `scriptlib_harvest_workspace(path, dry_run)` | Recursively copy script-like files from the workspace into the local project scriptlib catalog, honoring ignore rules. |
@@ -214,7 +214,7 @@ Install logs are written to `.braindrain/install-logs/install-.log`.
### Arch / Linux dev machines
-On Arch/rolling-release distros (e.g. EndeavourOS) where `python3` often points at Python 3.14:
+On Arch/rolling-release distros where `python3` often points at Python 3.14:
- The installer supports Python 3.11–3.14 and prefers `python3.14` when available.
- Make sure system `python`/`python3` and `pip` are installed via your package manager (e.g. `pacman -S python python-pip`).
@@ -226,7 +226,7 @@ cd braindrain
./install.sh
```
-is expected to succeed; if it doesn’t, capture the full log from `.braindrain/install-logs/` and append a new section to `QA-Logs/bdqadebug.md` (Lenovo/Arch debug log) before iterating.
+is expected to succeed; if it doesn’t, capture the full log from `.braindrain/install-logs/` and append a new section to `QA-Logs/bdqadebug.md` (debug log) before iterating.
---
@@ -387,6 +387,29 @@ Environment variables (copy `.env.example` to `.env.dev` to start):
The server auto-loads `.env.dev` → `.env.prod` → `.env` (first found, non-overriding of existing env vars).
+### Model provenance toggles
+
+`config/hub_config.yaml` now includes a `provenance` block used by planning/report tooling:
+
+- `provenance.chat_footer.enabled` and `provenance.chat_footer.scope` (`all_agents`, `planning_only`, `off`)
+- `provenance.plan_metadata.enabled`
+- `provenance.subagent_trace.enabled`
+- `provenance.subagent_trace.path` (default: `.braindrain/plan-reports/model-trace.jsonl`)
+- `provenance.date_format` (default: `%Y-%m-%d`)
+
+The planning auditor writes model metadata into report frontmatter:
+`created_by_model`, `created_at`, `last_modified_by_model`, `last_modified_at`, `cursor_mode`, and `subagent_models_used`.
+
+To force explicit provenance values during audit runs:
+
+```bash
+python3 scripts/daily_plan_audit.py \
+ --repo-root . \
+ --report-date "$(date +%Y-%m-%d)" \
+ --model-name "Codex 5.3" \
+ --cursor-mode auto
+```
+
---
## Repo structure
@@ -438,6 +461,7 @@ braindrain/
- `.cursor/hooks/on-stop-observe.sh` (lightweight stop-event observation)
- `.cursor/hooks/on-stop-gitops.sh` (TASK-GRAPH branch queueing)
- `.cursor/hooks/on-stop-daily-plan-audit.sh` (daily-gated planning audit report)
+ - Hook output contract: stop-hook scripts should be silent unless they intentionally emit valid JSON. Plain text output can cause Cursor hook-response JSON parse failures.
Edit templates under `config/templates/cursor/` in this repo, then re-prime consumer projects to roll out hook changes.
- **Subagent templates**: canonical source is `config/templates/agents/*.md`. `prime_workspace()` copies that tree to:
- `.cursor/agents/` when Cursor is in the resolved agent set, and
@@ -470,7 +494,7 @@ braindrain/
## Memory layer status and roadmap
-**Roadmap and release TODOs** ship from the repo root as `**ROADMAP.md`** and `**TODOS.md**`. Use `**.devdocs/**` only on your machine for private drafts (that path is gitignored and must not be committed).
+**Roadmap and release TODOs** ship from the repo root as `**ROADMAP.md`** and `**TODOS.md`**. Use `**.devdocs/**` only on your machine for private drafts (that path is gitignored and must not be committed).
Implemented now (runtime behavior in this repo):
diff --git a/braindrain/config.py b/braindrain/config.py
index a9ba5e9..99603c4 100644
--- a/braindrain/config.py
+++ b/braindrain/config.py
@@ -107,6 +107,7 @@ def _parse_config(self, raw: dict) -> ConfigData:
lessons=raw.get("lessons", {}),
dreaming=raw.get("dreaming", {}),
provider_context=raw.get("provider_context", {}),
+ provenance=raw.get("provenance", {}),
)
def reload(self) -> None:
diff --git a/braindrain/server.py b/braindrain/server.py
index 9094f40..0235883 100644
--- a/braindrain/server.py
+++ b/braindrain/server.py
@@ -85,6 +85,60 @@
_dream_engine: Optional[DreamEngine] = None
+def _provenance_settings() -> dict:
+ defaults = {
+ "enabled": True,
+ "date_format": "%Y-%m-%d",
+ "chat_footer": {"enabled": True, "scope": "all_agents"},
+ "plan_metadata": {"enabled": True},
+ "subagent_trace": {
+ "enabled": True,
+ "path": ".braindrain/plan-reports/model-trace.jsonl",
+ },
+ }
+ configured = config.get("provenance", {}) or {}
+ merged = dict(defaults)
+ merged.update(configured)
+ merged["chat_footer"] = {
+ **defaults["chat_footer"],
+ **(configured.get("chat_footer", {}) if isinstance(configured, dict) else {}),
+ }
+ merged["plan_metadata"] = {
+ **defaults["plan_metadata"],
+ **(configured.get("plan_metadata", {}) if isinstance(configured, dict) else {}),
+ }
+ merged["subagent_trace"] = {
+ **defaults["subagent_trace"],
+ **(configured.get("subagent_trace", {}) if isinstance(configured, dict) else {}),
+ }
+ return merged
+
+
+def _effective_model_name(explicit_model: str | None = None) -> str:
+ if explicit_model and explicit_model.strip():
+ return explicit_model.strip()
+ for env_key in (
+ "BRAINDRAIN_ACTIVE_MODEL",
+ "CURSOR_ACTIVE_MODEL",
+ "CURSOR_MODEL",
+ "MODEL_NAME",
+ ):
+ value = os.environ.get(env_key, "").strip()
+ if value:
+ return value
+ return "auto"
+
+
+def _effective_cursor_mode() -> str:
+ mode = (
+ os.environ.get("CURSOR_MODEL_SELECTION", "")
+ or os.environ.get("BRAINDRAIN_CURSOR_MODE", "")
+ ).strip().lower()
+ if mode in {"manual", "auto"}:
+ return mode
+ return "auto"
+
+
def _get_context_mode_client() -> Optional[ContextModeClient]:
global _context_mode_client
if _context_mode_client is not None:
@@ -500,6 +554,57 @@ async def get_token_dashboard() -> dict:
return telemetry.snapshot()
+@mcp.tool()
+def get_provenance_settings() -> dict:
+ """Return current model provenance settings and effective defaults."""
+ return {
+ "provenance": _provenance_settings(),
+ "effective_model": _effective_model_name(),
+ "cursor_mode": _effective_cursor_mode(),
+ "timestamp": datetime.now().isoformat(),
+ }
+
+
+@mcp.tool()
+def record_model_trace_event(
+ actor: str,
+ model_name: str = "",
+ event: str = "run",
+ source: str = "manual",
+ metadata: dict | None = None,
+) -> dict:
+ """Append a machine-local model provenance event for audits and plan reports."""
+ settings = _provenance_settings()
+ trace_cfg = settings.get("subagent_trace", {}) if isinstance(settings, dict) else {}
+ enabled = bool(trace_cfg.get("enabled", True)) and bool(settings.get("enabled", True))
+ if not enabled:
+ return {
+ "ok": True,
+ "status": "disabled",
+ "message": "provenance.subagent_trace is disabled",
+ }
+
+ trace_path = Path(str(trace_cfg.get("path") or ".braindrain/plan-reports/model-trace.jsonl"))
+ if not trace_path.is_absolute():
+ trace_path = Path.cwd() / trace_path
+ trace_path.parent.mkdir(parents=True, exist_ok=True)
+
+ now = datetime.now()
+ payload = {
+ "timestamp": now.isoformat(),
+ "date": now.strftime(str(settings.get("date_format", "%Y-%m-%d"))),
+ "actor": actor,
+ "event": event,
+ "source": source,
+ "model_name": _effective_model_name(model_name),
+ "cursor_mode": _effective_cursor_mode(),
+ "metadata": metadata or {},
+ }
+ with open(trace_path, "a", encoding="utf-8") as handle:
+ handle.write(json.dumps(payload, ensure_ascii=False) + "\n")
+ return {"ok": True, "trace_path": str(trace_path), "event": payload}
+
+
@mcp.tool()
def evaluate_memory_candidate(candidate: str) -> dict:
"""Evaluate whether a memory candidate can be promoted safely."""
diff --git a/braindrain/types.py b/braindrain/types.py
index be729e4..e911f0b 100644
--- a/braindrain/types.py
+++ b/braindrain/types.py
@@ -68,3 +68,4 @@ class ConfigData:
lessons: dict = field(default_factory=dict)
dreaming: dict = field(default_factory=dict)
provider_context: dict = field(default_factory=dict)
+ provenance: dict = field(default_factory=dict)
diff --git a/config/hub_config.yaml b/config/hub_config.yaml
index f2d548e..e680ae9 100644
--- a/config/hub_config.yaml
+++ b/config/hub_config.yaml
@@ -252,3 +252,17 @@ cost_tracking:
- module_attribution
alert_threshold_usd_per_session: 0.50
dashboard: true
+
+# ─── MODEL PROVENANCE ───────────────────────────────────────────
+# Controls model/date attribution in chat, plans, and subagent traces.
+provenance:
+ enabled: true
+ date_format: "%Y-%m-%d"
+ chat_footer:
+ enabled: true
+ scope: all_agents # all_agents | planning_only | off
+ plan_metadata:
+ enabled: true
+ subagent_trace:
+ enabled: true
+ path: ".braindrain/plan-reports/model-trace.jsonl"
diff --git a/config/templates/agents/coordinator.md b/config/templates/agents/coordinator.md
index ffd4377..217d727 100644
--- a/config/templates/agents/coordinator.md
+++ b/config/templates/agents/coordinator.md
@@ -37,6 +37,19 @@ For each stage:
5. **Checkpoint** — update `.cursor/PROGRESS.md` after each task
6. **Advance** — when all stage tasks pass, move to next stage
+## Model provenance requirements
+
+- Respect `config/hub_config.yaml` `provenance.*` toggles.
+- When dispatching a sub-agent, include explicit model slug when available and log the run by calling `record_model_trace_event` with:
+ - `actor`: sub-agent name
+ - `model_name`: resolved model (or `auto` when host does not expose)
+ - `event`: `start` and `finish`
+ - `source`: `coordinator`
+- For plan edits and planning close-out responses, include frontmatter provenance fields:
+ `created_by_model`, `created_at`, `last_modified_by_model`, `last_modified_at`, `cursor_mode`.
+- If `provenance.chat_footer.enabled=true` and scope allows, append:
+ `model: | date: `.
+
When you **write or materially edit** any `*.plan.md` under an IDE `plans/` tree, finish with planning close-out per Ruler `RULES.md`: update `_master.plan.md` links if needed, then invoke `daily-plan-auditor` or run `scripts/daily_plan_audit.py` (do not rely only on the daily-gated stop hook).
## Progress Tracking
diff --git a/config/templates/agents/daily-plan-auditor.md b/config/templates/agents/daily-plan-auditor.md
index 7f105c7..a95bdf6 100644
--- a/config/templates/agents/daily-plan-auditor.md
+++ b/config/templates/agents/daily-plan-auditor.md
@@ -57,3 +57,9 @@ Return JSON only:
- Prefer the generated reports under `.braindrain/plan-reports/` over re-deriving scores in chat.
- Replan work: prefer a **new** plan file and link supersession in `_master.plan.md` rather than silently overwriting history (see coordinator / architect guidance).
- Research-heavy follow-ups: delegate to the `research` subagent, then fold findings back into the parent plan.
+- Ensure plan/report frontmatter contains model provenance fields:
+ `created_by_model`, `created_at`, `last_modified_by_model`, `last_modified_at`, `cursor_mode`.
+- Include sub-agent model rollup (`subagent_models_used`) when trace data exists at
+ `.braindrain/plan-reports/model-trace.jsonl`.
+- If chat footer policy is enabled for planning scope, append:
+ `model: | date: `.
diff --git a/config/templates/cursor/hooks/on-stop-observe.sh b/config/templates/cursor/hooks/on-stop-observe.sh
index ff1677b..ef5be65 100644
--- a/config/templates/cursor/hooks/on-stop-observe.sh
+++ b/config/templates/cursor/hooks/on-stop-observe.sh
@@ -55,7 +55,7 @@ METADATA_SQL="$(sql_escape "${METADATA_JSON}")"
SESSION_SQL="$(sql_escape "${SESSION_ID}")"
TIMESTAMP="$(date +%s)"
-sqlite3 "${DB_PATH}" </dev/null 2>/dev/null < | date: ` using `provenance.date_format`.
+- When model identity is not available from the host, use `model: auto` (never invent model names).
+- In `planning_only` scope, when creating or materially rewriting any `*.plan.md`, include YAML frontmatter metadata at first write (not later patch-up), including:
+ - plan metadata: `name`, `owner`, `dri`, `disposition`, `priority`, `parent`, `ide`, `isProject`, `todos`
+ - provenance metadata: `created_by_model`, `created_at`, `last_modified_by_model`, `last_modified_at`, `cursor_mode`
+- For plan files and generated plan reports, include provenance in YAML frontmatter:
+ - `created_by_model`, `created_at`, `last_modified_by_model`, `last_modified_at`, `cursor_mode`.
+- For sub-agent operations, record model provenance events to
+ `.braindrain/plan-reports/model-trace.jsonl` when `provenance.subagent_trace.enabled=true`.
+
### Ops/docs to keep current (when behaviour/run paths/tools change)
- `.braindrain/SESSION_PROGRESS.md`
diff --git a/scripts/daily_plan_audit.py b/scripts/daily_plan_audit.py
index 8b1e03e..a1f07a6 100644
--- a/scripts/daily_plan_audit.py
+++ b/scripts/daily_plan_audit.py
@@ -12,6 +12,7 @@
import dataclasses
import datetime as dt
import getpass
+import json
import os
import re
import shutil
@@ -122,6 +123,50 @@
)
+def resolve_model_name(model_name: str | None = None) -> str:
+ if model_name and model_name.strip():
+ return model_name.strip()
+ for key in (
+ "BRAINDRAIN_ACTIVE_MODEL",
+ "CURSOR_ACTIVE_MODEL",
+ "CURSOR_MODEL",
+ "MODEL_NAME",
+ ):
+ value = os.environ.get(key, "").strip()
+ if value:
+ return value
+ return "auto"
+
+
+def resolve_cursor_mode(cursor_mode: str | None = None) -> str:
+ mode = (
+ cursor_mode
+ or os.environ.get("CURSOR_MODEL_SELECTION", "")
+ or os.environ.get("BRAINDRAIN_CURSOR_MODE", "")
+ ).strip().lower()
+ if mode in {"auto", "manual"}:
+ return mode
+ return "auto"
+
+
+def load_trace_models(trace_path: Path, limit: int = 1000) -> list[str]:
+ if not trace_path.is_file():
+ return []
+ models: list[str] = []
+ for raw in trace_path.read_text(encoding="utf-8", errors="ignore").splitlines()[-limit:]:
+ line = raw.strip()
+ if not line:
+ continue
+ try:
+ payload = json.loads(line)
+ except json.JSONDecodeError:
+ continue
+ model_name = str(payload.get("model_name") or "").strip()
+ if model_name:
+ models.append(model_name)
+ return sorted(set(models))
+
+
def _strip_quotes(value: str) -> str:
if len(value) >= 2 and value[0] == value[-1] and value[0] in {'"', "'"}:
return value[1:-1]
@@ -433,6 +478,21 @@ def parse_args() -> argparse.Namespace:
)
parser.add_argument("--report-date", default=dt.date.today().isoformat())
parser.add_argument("--trigger", default="cursor-stop-daily-gated")
+ parser.add_argument(
+ "--model-name",
+ default="",
+ help="Model name for provenance metadata (defaults to env lookup or auto).",
+ )
+ parser.add_argument(
+ "--cursor-mode",
+ default="",
+ help="Cursor model mode for provenance metadata (auto/manual).",
+ )
+ parser.add_argument(
+ "--trace-path",
+ default=".braindrain/plan-reports/model-trace.jsonl",
+ help="JSONL path for model trace events used to populate subagent model rollups.",
+ )
parser.add_argument(
"--master-plan",
default=None,
@@ -933,6 +993,7 @@ def render_task_board_markdown(
items: list[PlanItem],
*,
cards_by_source: dict[str, "PlanCard"] | None = None,
+ provenance: dict[str, object] | None = None,
) -> str:
"""Single markdown table of active work, regenerated each audit run.
@@ -960,9 +1021,22 @@ def render_task_board_markdown(
"Do not edit by hand — ownership inherits from the parent plan's "
"frontmatter (`owner:` / `dri:`) unless an item explicitly delegates._",
"",
+ ]
+ provenance = provenance or {}
+ lines.extend(
+ [
+ f"_model: {provenance.get('last_modified_by_model', 'auto')} | "
+ f"cursor_mode: {provenance.get('cursor_mode', 'auto')} | "
+ f"date: {provenance.get('last_modified_at', report_date)}_",
+ "",
+ ]
+ )
+ lines.extend(
+ [
"| IDE | Status | Owner | Item | Source | Gaps |",
"|-----|--------|-------|------|--------|------|",
- ]
+ ]
+ )
for item in rows:
plan_card = cards_by_source.get(item.source)
plan_has_owner = bool(
@@ -1193,10 +1267,93 @@ def parse_master_plan(master_path: Path, repo_root: Path) -> dict[str, object]:
return {"frontmatter": fm, "children": children}
+def sync_master_plan(
+ master_path: Path | None,
+ repo_root: Path,
+ cards: list["PlanCard"],
+) -> list[str]:
+ """Auto-add missing discovered plans into curated `_master.plan.md`.
+
+ Missing plans are grouped under their disposition heading (e.g. `## active`).
+ Returns the repo-relative source paths that were inserted.
+ """
+ if not master_path or not master_path.is_file():
+ return []
+
+ master_doc = parse_master_plan(master_path, repo_root)
+ in_master: set[str] = set(master_doc.get("children", [])) # type: ignore[arg-type]
+ by_source = {card.source: card for card in cards if not card.is_master}
+ missing = sorted(set(by_source.keys()) - in_master)
+ if not missing:
+ return []
+
+ text = master_path.read_text(encoding="utf-8", errors="ignore")
+ fm_match = FRONTMATTER_BLOCK_RE.match(text)
+ fm_block = fm_match.group(0) if fm_match else ""
+ body = text[len(fm_block):]
+ body_lines = body.splitlines()
+
+ def section_bounds(section_name: str) -> tuple[int, int] | None:
+ header = f"## {section_name}"
+ start = None
+ for idx, line in enumerate(body_lines):
+ if line.strip().lower() == header.lower():
+ start = idx
+ break
+ if start is None:
+ return None
+ end = len(body_lines)
+ for idx in range(start + 1, len(body_lines)):
+ if body_lines[idx].startswith("## "):
+ end = idx
+ break
+ return start, end
+
+ inserted: list[str] = []
+ # Keep insertion stable by disposition order, then source path.
+ ordered_missing = sorted(
+ missing,
+ key=lambda src: (
+ _DISPOSITION_ORDER.index(by_source[src].disposition)
+ if by_source[src].disposition in _DISPOSITION_ORDER
+ else 99,
+ src,
+ ),
+ )
+ for src in ordered_missing:
+ card = by_source[src]
+ target_abs = (repo_root / src).resolve()
+ rel_link = os.path.relpath(target_abs, start=master_path.parent.resolve()).replace("\\", "/")
+ bullet = f"- [{card.title}]({rel_link}) — DRI: {card.dri}"
+
+ bounds = section_bounds(card.disposition)
+ if bounds is None:
+ if body_lines and body_lines[-1].strip():
+ body_lines.append("")
+ body_lines.append(f"## {card.disposition}")
+ body_lines.append("")
+ body_lines.append(bullet)
+ inserted.append(src)
+ continue
+
+ start, end = bounds
+ insert_at = end
+ # Keep a blank line between section content and next heading.
+ while insert_at > start + 1 and not body_lines[insert_at - 1].strip():
+ insert_at -= 1
+ body_lines.insert(insert_at, bullet)
+ inserted.append(src)
+
+ new_body = "\n".join(body_lines).rstrip() + "\n"
+ master_path.write_text(fm_block + new_body, encoding="utf-8")
+ return inserted
+
+
def render_next_actions(
actions: list[Action],
*,
report_date: str = "",
+ provenance: dict[str, object] | None = None,
) -> str:
"""Render the triage queue grouped by verb.
@@ -1216,6 +1373,12 @@ def render_next_actions(
"Do not edit by hand. Action a verb by editing the parent plan's "
"`disposition:` and re-running the audit._"
)
+ provenance = provenance or {}
+ lines.append(
+ f"_model: {provenance.get('last_modified_by_model', 'auto')} | "
+ f"cursor_mode: {provenance.get('cursor_mode', 'auto')} | "
+ f"date: {provenance.get('last_modified_at', report_date)}_"
+ )
lines.append("")
if not actions:
@@ -1251,6 +1414,7 @@ def render_master_mirror(
master_doc: dict[str, object] | None = None,
*,
report_date: str = "",
+ provenance: dict[str, object] | None = None,
) -> str:
"""Generated mirror of the master plan with rollup + drift detection.
@@ -1288,6 +1452,13 @@ def render_master_mirror(
f"Do not edit by hand. Source: {src_note}._"
)
lines.append("")
+ provenance = provenance or {}
+ lines.append(
+ f"_model: {provenance.get('last_modified_by_model', 'auto')} | "
+ f"cursor_mode: {provenance.get('cursor_mode', 'auto')} | "
+ f"date: {provenance.get('last_modified_at', report_date or '')}_"
+ )
+ lines.append("")
if not cards:
lines.append("- _No plans discovered._")
@@ -1383,6 +1554,7 @@ def build_report(
items: list[PlanItem],
*,
cards_by_source: dict[str, "PlanCard"] | None = None,
+ provenance: dict[str, object] | None = None,
) -> str:
overlaps = detect_overlaps(items)
cards_by_source = cards_by_source or {}
@@ -1449,6 +1621,7 @@ def _item_has_inherited_owner(it: PlanItem) -> bool:
"top_risks": top_risks[:5],
"memory_context": mem,
}
+ provenance = provenance or {}
body: list[str] = []
body.append("---")
@@ -1476,6 +1649,25 @@ def _item_has_inherited_owner(it: PlanItem) -> bool:
body.append(f" - \"{source}\"")
if not frontmatter["memory_context"]["sources"]:
body.append(" - \"none\"")
+ body.append("provenance:")
+ body.append(
+ f" created_by_model: \"{str(provenance.get('created_by_model', 'auto'))}\""
+ )
+ body.append(
+ f" created_at: \"{str(provenance.get('created_at', report_date))}\""
+ )
+ body.append(
+ f" last_modified_by_model: \"{str(provenance.get('last_modified_by_model', 'auto'))}\""
+ )
+ body.append(
+ f" last_modified_at: \"{str(provenance.get('last_modified_at', report_date))}\""
+ )
+ body.append(
+ f" cursor_mode: \"{str(provenance.get('cursor_mode', 'auto'))}\""
+ )
+ body.append(" subagent_models_used:")
+ for model in provenance.get("subagent_models_used", []) or ["auto"]:
+ body.append(f" - \"{model}\"")
body.append("---")
body.append("")
body.append("# Daily Plan Audit Report")
@@ -1572,6 +1764,19 @@ def _item_has_inherited_owner(it: PlanItem) -> bool:
def main() -> int:
args = parse_args()
repo_root = Path(args.repo_root).resolve()
+ trace_path = Path(args.trace_path)
+ if not trace_path.is_absolute():
+ trace_path = repo_root / trace_path
+ model_name = resolve_model_name(args.model_name)
+ cursor_mode = resolve_cursor_mode(args.cursor_mode)
+ provenance = {
+ "created_by_model": model_name,
+ "created_at": args.report_date,
+ "last_modified_by_model": model_name,
+ "last_modified_at": args.report_date,
+ "cursor_mode": cursor_mode,
+ "subagent_models_used": load_trace_models(trace_path),
+ }
out_dir = Path(args.output_dir)
if not out_dir.is_absolute():
out_dir = repo_root / out_dir
@@ -1602,6 +1807,7 @@ def main() -> int:
secondary,
items,
cards_by_source=cards_by_source,
+ provenance=provenance,
)
if archive_moves:
report = (
@@ -1617,7 +1823,10 @@ def main() -> int:
shutil.copyfile(dated_path, latest_path)
board = render_task_board_markdown(
- args.report_date, items, cards_by_source=cards_by_source
+ args.report_date,
+ items,
+ cards_by_source=cards_by_source,
+ provenance=provenance,
)
(out_dir / "plan-task-board.md").write_text(board, encoding="utf-8")
@@ -1630,17 +1839,33 @@ def main() -> int:
else:
master_path = discover_master_plan(repo_root)
+ synced_master_entries = sync_master_plan(
+ master_path,
+ repo_root,
+ list(cards_by_source.values()),
+ )
master_doc = parse_master_plan(master_path, repo_root) if master_path else None
mirror = render_master_mirror(
list(cards_by_source.values()),
master_doc,
report_date=args.report_date,
+ provenance=provenance,
)
(out_dir / "master-plan.md").write_text(mirror, encoding="utf-8")
# Triage queue (`next-actions.md`).
actions = detect_actions(list(cards_by_source.values()))
- next_actions = render_next_actions(actions, report_date=args.report_date)
+ next_actions = render_next_actions(
+ actions,
+ report_date=args.report_date,
+ provenance=provenance,
+ )
+ if synced_master_entries:
+ next_actions += (
+ "\n## Master plan sync (this run)\n\n"
+ + "\n".join(f"- added to `_master.plan.md`: `{src}`" for src in synced_master_entries)
+ + "\n"
+ )
(out_dir / "next-actions.md").write_text(next_actions, encoding="utf-8")
print(str(dated_path))
diff --git a/tests/test_plan_auditor_master.py b/tests/test_plan_auditor_master.py
index b6e5ae8..70c13fb 100644
--- a/tests/test_plan_auditor_master.py
+++ b/tests/test_plan_auditor_master.py
@@ -88,3 +88,49 @@ def test_plan_marked_archived_disposition() -> None:
assert m.plan_marked_archived({"disposition": "archived"})
assert m.plan_marked_archived({"status": "archived"})
assert not m.plan_marked_archived({"disposition": "active"})
+
+
+def test_report_includes_model_provenance_frontmatter(tmp_project_dir: Path) -> None:
+ m = _load_audit_module()
+ (tmp_project_dir / ".cursor" / "plans").mkdir(parents=True)
+ (tmp_project_dir / ".cursor" / "plans" / "p.plan.md").write_text(
+ "# Plan\n- [ ] owner: test item\n",
+ encoding="utf-8",
+ )
+ trace_path = tmp_project_dir / ".braindrain" / "plan-reports" / "model-trace.jsonl"
+ trace_path.parent.mkdir(parents=True, exist_ok=True)
+ trace_path.write_text(
+ '{"model_name":"composer-2","actor":"coordinator"}\n'
+ '{"model_name":"gpt-5.4-medium","actor":"research"}\n',
+ encoding="utf-8",
+ )
+
+ argv = [
+ "daily_plan_audit.py",
+ "--repo-root",
+ str(tmp_project_dir),
+ "--report-date",
+ "2026-06-02",
+ "--model-name",
+ "Codex 5.3",
+ "--cursor-mode",
+ "auto",
+ "--trace-path",
+ str(trace_path),
+ ]
+ from unittest.mock import patch
+
+ with patch.object(sys, "argv", argv):
+ assert m.main() == 0
+
+ report = (
+ tmp_project_dir
+ / ".braindrain"
+ / "plan-reports"
+ / "plan-audit-2026-06-02.md"
+ ).read_text(encoding="utf-8")
+ assert 'created_by_model: "Codex 5.3"' in report
+ assert 'cursor_mode: "auto"' in report
+ assert 'subagent_models_used:' in report
+ assert ' - "composer-2"' in report
+ assert ' - "gpt-5.4-medium"' in report
diff --git a/tests/test_workspace_primer_hooks.py b/tests/test_workspace_primer_hooks.py
index c72a9c0..8e22645 100644
--- a/tests/test_workspace_primer_hooks.py
+++ b/tests/test_workspace_primer_hooks.py
@@ -4,8 +4,10 @@
import importlib.util
import json
+import os
import shutil
import stat
+import subprocess
import sys
import uuid
from pathlib import Path
@@ -128,6 +130,46 @@ def test_daily_plan_hook_contains_once_per_day_gate() -> None:
assert 'if [ "${LAST_RUN_DATE}" = "${TODAY}" ]; then' in content
+def test_observe_hook_template_suppresses_known_stdout_sources() -> None:
+ """Guard against regressions that break Cursor JSON hook parsing."""
+ hook_path = CURSOR_HOOK_TEMPLATES_DIR / "hooks" / "on-stop-observe.sh"
+ content = hook_path.read_text(encoding="utf-8")
+ assert "PRAGMA journal_mode=WAL;" in content
+ assert 'sqlite3 "${DB_PATH}" >/dev/null 2>/dev/null < None:
+ hook_path = _REPO_ROOT / ".cursor" / "hooks" / "on-stop-observe.sh"
+ if not hook_path.is_file():
+ pytest.skip("workspace observe hook not present")
+ for dep in ("sqlite3", "jq", "git"):
+ if shutil.which(dep) is None:
+ pytest.skip(f"{dep} is required for observe hook runtime test")
+
+ fake_home = tmp_project_dir / "home"
+ fake_home.mkdir(parents=True, exist_ok=True)
+ payload = json.dumps(
+ {
+ "session_id": "hook-test-session",
+ "hook_event_name": "stop",
+ "workspace_roots": [str(_REPO_ROOT)],
+ }
+ )
+ result = subprocess.run(
+ [str(hook_path)],
+ input=payload,
+ text=True,
+ capture_output=True,
+ cwd=_REPO_ROOT,
+ env={**os.environ, **{"HOME": str(fake_home)}},
+ check=False,
+ )
+ assert result.returncode == 0
+ assert result.stdout.strip() == ""
+ assert result.stderr.strip() == ""
+
+
def test_daily_plan_audit_prioritizes_cursor_plan_files(tmp_project_dir: Path) -> None:
module = _load_audit_module()
(tmp_project_dir / ".cursor" / "plans").mkdir(parents=True, exist_ok=True)