diff --git a/.changelog/v0.11.17.md b/.changelog/v0.11.17.md index 2b830a64..f84516f1 100644 --- a/.changelog/v0.11.17.md +++ b/.changelog/v0.11.17.md @@ -1,6 +1,6 @@ # Release v0.11.17 - Changelog -Released: YYYY-MM-DD +Released: 2026-02-07 ## Overview diff --git a/.changelog/v0.12.48.md b/.changelog/v0.12.48.md index 0a2e9bff..fcdcc748 100644 --- a/.changelog/v0.12.48.md +++ b/.changelog/v0.12.48.md @@ -1,6 +1,6 @@ # Release v0.12.48 - Changelog -Released: YYYY-MM-DD +Released: 2026-02-14 ## Overview diff --git a/.changelog/v0.13.20.md b/.changelog/v0.13.20.md index 6cb16815..50d36f83 100644 --- a/.changelog/v0.13.20.md +++ b/.changelog/v0.13.20.md @@ -1,6 +1,6 @@ # Release v0.13.20 - Changelog -Released: YYYY-MM-DD +Released: 2026-02-17 ## Overview diff --git a/.changelog/v0.14.21.md b/.changelog/v0.14.21.md index 88d22981..b4f64068 100644 --- a/.changelog/v0.14.21.md +++ b/.changelog/v0.14.21.md @@ -1,6 +1,6 @@ # Release v0.14.21 - Changelog -Released: YYYY-MM-DD +Released: 2026-02-18 ## Overview diff --git a/.changelog/v0.15.x.md b/.changelog/v0.15.x.md new file mode 100644 index 00000000..004b0921 --- /dev/null +++ b/.changelog/v0.15.x.md @@ -0,0 +1,59 @@ +# Release v0.15.x - Changelog + +Released: YYYY-MM-DD + +## Overview + +Plan cleanup and documentation reorganization. + +## Fixes + +### CI: Release date placeholder not substituted +- **Fixed `YYYY-MM-DD` never replaced** in release notes -- the `release.yml` workflow substituted version placeholders but not the date placeholder +- **Added date substitution** to both the release body generation and the archived changelog file +- **Backfilled 4 releases** (v0.11.17, v0.12.48, v0.13.20, v0.14.21) with correct dates + +### CI: Recurring changelog merge conflicts on every release +- **Fixed cherry-pick divergence** in `release.yml` -- the workflow was archiving the changelog on `dev` then cherry-picking to `main`, creating parallel commit histories that caused merge conflicts on the next PR +- **Changed to archive on `main` first**, then merge main into dev -- this creates a shared commit ancestor, eliminating the divergent histories + +### Hardening: Safe JSON parsing & output redaction +- **Replaced raw `JSON.parse` with `readJSONFile`** in `apps.js`, `socialAccounts.js`, `scriptRunner.js` -- corrupted data files no longer crash the server +- **Added null guard** on `data?.apps?.[id]` in `apps.js` to prevent undefined access +- **Added `.catch()` handler** on fire-and-forget `cloneRepoInBackground()` in `brain.js` to prevent unhandled promise rejections +- **Added sensitive output redaction** in `scriptRunner.js` -- env vars containing keys, tokens, passwords, and secrets are now `[REDACTED]` before persisting to `scripts-state.json` + +### Bugs Fixed +- **Fixed index out of bounds** in `agents.js` Windows process parsing -- checked `parts.length >= 6` but accessed `parts[6]` (needs `>= 7`) +- **Fixed non-unique React keys** in `UpcomingTasksWidget` -- `task.taskType` isn't unique, now uses compound keys +- **Fixed missing optional chaining** in `agentTools.js` -- `commentsResponse.comments` would throw if null +- **Fixed falsy value bug** in `apps.js` routes -- used `||` instead of `??` for PM2 process fallback +- **Fixed unsafe `JSON.parse`** in `pm2.js` `listProcesses` -- now uses `safeJSONParse` with fallback +- **Fixed silent error swallowing** in `cos.js` actionable insights -- `Promise.all` catches now log errors +- **Fixed missing date validation** in `formatters.js` -- `formatTime()` now guards against null/invalid timestamps +- **Fixed hardcoded localhost** in `vite.config.js` proxy -- now configurable via `VITE_API_HOST` env var +- **Fixed dependency version mismatches** -- aligned Express, PM2, and portos-ai-toolkit between root and server `package.json` +- **Removed forbidden `pm2:kill`** npm script that violated project rules + +### DRY Refactors +- **Extracted `spawnPm2Cli` helper** in `pm2.js` -- eliminated 3x duplicated spawn+stderr+close blocks for stop/restart/delete +- **Extracted `loadApp` middleware** in `apps.js` routes -- eliminated 11 repeated `getAppById` + null check patterns +- **Used existing `extractJSONArray`/`safeJSONParse`** in `pm2.js` -- removed duplicated JSON extraction logic +- **Created `useAutoRefetch` hook** -- consolidated identical fetch+interval `useEffect` pattern across 4 dashboard widgets +- **Extracted `pickScheduleSettings`** in `cos.js` -- eliminated duplicated `if (X !== undefined)` settings pattern +- **Removed duplicate `.pm2` entry** from `.gitignore` + +### YAGNI Cleanup +- **Deleted 3 dead `.old.js` route files** -- `prompts.old.js`, `providers.old.js`, `runs.old.js` +- **Removed unused `addAllowedCommand`/`removeAllowedCommand`** from `commands.js` -- never called from any route + +## Improvements + +### Plan & Documentation Cleanup +- **Extracted M42 spec** to `docs/features/identity-system.md` -- removed ~600 lines from PLAN.md +- **Extracted M40 summary** to `docs/features/agent-skills.md` -- completed milestone moved to feature docs +- **Moved M40 to Completed** -- all 4 phases (skill templates, context compaction, negative routing, deterministic workflows) were done +- **Removed pump.fun content** (M44) -- belongs in pump.funner repo, not PortOS +- **Removed orphaned research docs** -- `pumpfun-data-sources.md` and `kalshibot-health-check-2026-02-17.md` were for other projects +- **Added Scope Boundary rule** to CLAUDE.md -- CoS agents must write research/plans/docs for managed apps in those apps' repos, not PortOS +- **Updated Next Actions** -- focused on M42 Identity System, M7 App Templates, M34 behavioral feedback diff --git a/.changelog/v0.9.19.md b/.changelog/v0.9.19.md index ecd5b3bf..fc5928a4 100644 --- a/.changelog/v0.9.19.md +++ b/.changelog/v0.9.19.md @@ -1,6 +1,6 @@ # Release Notes - v0.9.19 -**Release Date:** YYYY-MM-DD +**Release Date:** 2026-02-01 ## πŸŽ‰ Features diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0c8b36ad..8a98e72f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -60,7 +60,7 @@ jobs: # Use minor version pattern changelog (e.g., v0.10.x.md) CHANGELOG=$(cat "$CHANGELOG_FILE_PATTERN") # Replace version placeholder with actual version - CHANGELOG=$(echo "$CHANGELOG" | sed "s/v${MAJOR_MINOR}.x/v${VERSION}/g" | sed "s/${MAJOR_MINOR}.x/${VERSION}/g") + CHANGELOG=$(echo "$CHANGELOG" | sed "s/v${MAJOR_MINOR}.x/v${VERSION}/g" | sed "s/${MAJOR_MINOR}.x/${VERSION}/g" | sed "s/YYYY-MM-DD/$(date +%Y-%m-%d)/g") else # Fallback: Generate changelog from commits (exclude [skip ci] commits) PREV_TAG=$(git describe --tags --abbrev=0 2>/dev/null || git rev-list --max-parents=0 HEAD) @@ -113,37 +113,30 @@ jobs: NEW_MINOR=$((MINOR + 1)) NEW_VERSION="$MAJOR.$NEW_MINOR.0" - # Checkout dev branch - git fetch origin dev - git checkout dev - - # Rename and version the changelog file + # Archive changelog on main first (single source of truth) + # This avoids cherry-pick divergence that causes merge conflicts on the next PR PATTERN_FILE=".changelog/v${MAJOR_MINOR}.x.md" VERSIONED_FILE=".changelog/v${CURRENT_VERSION}.md" if [ -f "$PATTERN_FILE" ]; then - # Rename the file (preserves git history) + # Rename and version the changelog file on main git mv "$PATTERN_FILE" "$VERSIONED_FILE" # Replace version placeholders in the renamed file - sed -i.bak "s/v${MAJOR_MINOR}\.x/v${CURRENT_VERSION}/g; s/${MAJOR_MINOR}\.x/${CURRENT_VERSION}/g" "$VERSIONED_FILE" + sed -i.bak "s/v${MAJOR_MINOR}\.x/v${CURRENT_VERSION}/g; s/${MAJOR_MINOR}\.x/${CURRENT_VERSION}/g; s/YYYY-MM-DD/$(date +%Y-%m-%d)/g" "$VERSIONED_FILE" rm "${VERSIONED_FILE}.bak" - # Commit the renamed and updated changelog git add "$VERSIONED_FILE" git commit -m "docs: archive changelog for v${CURRENT_VERSION} [skip ci]" - - # Capture the changelog commit hash before switching branches - CHANGELOG_COMMIT=$(git rev-parse HEAD) - git push origin dev - - # Merge changelog back to main (without triggering CI) - git checkout main - git cherry-pick "$CHANGELOG_COMMIT" git push origin main - git checkout dev fi + # Checkout dev and merge main to share the changelog commit history + git fetch origin dev + git checkout dev + git fetch origin main + git merge origin/main -m "merge: incorporate v${CURRENT_VERSION} changelog archive [skip ci]" + # Update package.json files for next version npm version $NEW_VERSION --no-git-tag-version cd client && npm version $NEW_VERSION --no-git-tag-version && cd .. diff --git a/.gitignore b/.gitignore index 61c66341..b389bbc1 100644 --- a/.gitignore +++ b/.gitignore @@ -39,4 +39,3 @@ Thumbs.db # Browser service node_modules (code is committed) browser/node_modules/ -.pm2 diff --git a/CLAUDE.md b/CLAUDE.md index 24871c96..e1773987 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -60,6 +60,10 @@ PortOS depends on `portos-ai-toolkit` as an npm module for AI provider managemen - The toolkit uses spread in `updateProvider()` so existing providers preserve custom fields, but `createProvider()` has an explicit field list - After updating the toolkit, run `npm update portos-ai-toolkit` in PortOS to pull changes +## Scope Boundary + +When CoS agents or AI tools work on managed apps outside PortOS, all research, plans, docs, and code for those apps must be written to the target app's own repository/directory -- never to this repo. PortOS stores only its own features, plans, and documentation. If an agent generates a PLAN.md, research doc, or feature spec for another app, it goes in that app's directory. + ## Code Conventions - **No try/catch** - errors bubble to centralized middleware @@ -133,14 +137,12 @@ When you merge to `main`, the GitHub Actions workflow automatically: 1. Reads `.changelog/v0.10.x.md` 2. Replaces all instances of `0.10.x` with actual version (e.g., `0.10.5`) 3. Creates the GitHub release with substituted changelog -4. Checks out dev branch -5. Renames `v0.10.x.md` β†’ `v0.10.5.md` using `git mv` (preserves git history) -6. Commits the renamed file to dev: `"docs: archive changelog for v0.10.5 [skip ci]"` -7. Cherry-picks that commit to main -8. Bumps dev to next minor version (e.g., 0.11.0) +4. Archives the changelog on `main`: renames `v0.10.x.md` β†’ `v0.10.5.md` using `git mv` +5. Checks out dev branch and merges main into dev (shared commit history avoids future conflicts) +6. Bumps dev to next minor version (e.g., 0.11.0) **Result:** -- Both `dev` and `main` have `.changelog/v0.10.5.md` matching the tagged release +- Both `dev` and `main` have `.changelog/v0.10.5.md` via shared commit ancestry - Git history shows: `v0.10.x.md` β†’ `v0.10.5.md` (rename) - You create `.changelog/v0.11.x.md` to start the next development cycle diff --git a/PLAN.md b/PLAN.md index b4aae25b..d2689a9d 100644 --- a/PLAN.md +++ b/PLAN.md @@ -66,896 +66,49 @@ pm2 logs - [x] **M37**: Autonomous Jobs - Recurring scheduled jobs that the CoS executes proactively using digital twin identity - [x] **M38**: Agent Tools - AI content generation, feed browsing, and autonomous engagement for Moltbook agents - [x] **M39**: Agent-Centric Drill-Down - Redesigned Agents section with agent-first hierarchy, deep-linkable URLs, and scoped sub-tabs -- [x] **M41**: CyberCity Immersive Overhaul - Procedural synthwave audio, enhanced post-processing (chromatic aberration, film grain, color grading), reflective wet-street ground, settings system, and atmosphere enhancements -- [x] **M43**: Moltworld Platform Support - Second platform integration for AI agents in a shared voxel world with movement, building, thinking, messaging, and SIM token economy +- [x] **M40**: Agent Skill System - Task-type-specific prompts, context compaction, negative routing examples, deterministic workflow skills. See [Agent Skills](./docs/features/agent-skills.md) +- [x] **M41**: CyberCity Immersive Overhaul - Procedural synthwave audio, enhanced post-processing, reflective wet-street ground, settings system +- [x] **M43**: Moltworld Platform Support - Second platform integration for AI agents in a shared voxel world ### Planned - [ ] **M7**: App Templates - Template management and app scaffolding from templates - [ ] **M34 P3,P5-P7**: Digital Twin - Behavioral feedback loop, multi-modal capture, advanced testing, personas -- [ ] **M40**: Agent Skill System - Task-type-specific prompt templates with routing logic, negative examples, and embedded workflows for improved agent accuracy and reliability -- [ ] **M42**: Unified Digital Twin Identity System - Connect Genome (117 markers, 32 categories), Chronotype (5 sleep markers + behavioral), Aesthetic Taste (P2 complete, P2.5 adds twin-aware prompting), and Mortality-Aware Goals into a single coherent Identity architecture with cross-insights engine -- [ ] **M44**: Pump.fun Launch Tracking Engine - Real-time pump.fun token detection via Helius webhooks, token enrichment via Birdeye, sniper account tracking, and launch analytics dashboard +- [ ] **M42**: Unified Digital Twin Identity System - See [Identity System](./docs/features/identity-system.md) --- -## M44: Pump.fun Launch Tracking Engine - -### Motivation - -The Solana memecoin ecosystem on pump.fun generates thousands of token launches daily. A small percentage become high-performers (10x+ returns). Sniper accounts β€” wallets that consistently buy into winning tokens within seconds of launch β€” represent a detectable signal for launch quality. This engine detects new launches in real-time, tracks token performance, inventories sniper accounts, and builds a data foundation for predicting upcoming high-performing launches. - -**Brain Project**: 467fbe07 β€” research complete (see `docs/research/pumpfun-data-sources.md`) - -### Data Source Selection - -| Source | Role | Tier | Cost/mo | -|--------|------|------|---------| -| **Helius** | Primary: real-time token detection, transaction monitoring | Developer | $49 | -| **Birdeye** | Enrichment: market cap, volume, security scores, OHLCV | Starter | $99 | -| **pump.fun Direct** | Supplement: creator metadata (sparingly, no SLA) | Free | $0 | - -**Total**: $148/mo for Phase 1+2 - -### Data Model - -All data persists to `data/pumpfun/` following PortOS conventions (entity stores with `records` keyed by ID, JSONL for append-heavy logs, 2s cache TTL). - -#### `data/pumpfun/meta.json` β€” Configuration - -```json -{ - "version": "1.0.0", - "helius": { - "apiKey": null, - "webhookId": null, - "webhookUrl": null, - "programId": "6EF8rrecthR5Dkzon8Nwu78hRvfCKubJ14M5uBEwF6P", - "tier": "developer", - "rpsLimit": 10 - }, - "birdeye": { - "apiKey": null, - "tier": "starter", - "rpsLimit": 15 - }, - "tracking": { - "enabled": false, - "enrichmentIntervalMs": 60000, - "snapshotIntervalMs": 300000, - "retentionDays": 90, - "autoEnrich": true - }, - "filters": { - "minHolders": 10, - "minVolume24h": 1000, - "minLiquiditySol": 5, - "excludeRugPull": true - }, - "alerts": { - "volumeSpikeThreshold": 5, - "holderSpikeThreshold": 3, - "sniperOverlapThreshold": 3 - } -} -``` - -#### `data/pumpfun/tokens.json` β€” Tracked Tokens (Entity Store) - -```json -{ - "records": { - "TokenMintAddress44chars": { - "mint": "TokenMintAddress44chars", - "symbol": "PUMP", - "name": "Pump Token", - "creator": "CreatorWalletAddress", - "launchSignature": "txSignature", - "launchSlot": 123456789, - "launchAt": "2026-02-17T12:00:00.000Z", - "bondingCurve": { - "address": "BondingCurveAccountAddress", - "graduated": false, - "graduatedAt": null - }, - "status": "active", - "performance": { - "athMultiple": null, - "athPrice": null, - "athAt": null, - "currentPrice": null, - "priceAtLaunch": null - }, - "metrics": { - "holders": 0, - "volume24h": 0, - "marketCap": 0, - "liquidity": 0, - "securityScore": null - }, - "snipers": [], - "tags": [], - "enrichedAt": null, - "createdAt": "2026-02-17T12:00:00.000Z", - "updatedAt": "2026-02-17T12:00:00.000Z" - } - } -} -``` - -Key: mint address (not UUID) since tokens are uniquely identified by their on-chain mint. - -#### `data/pumpfun/snipers.json` β€” Sniper Account Inventory (Entity Store) - -```json -{ - "records": { - "WalletAddress": { - "wallet": "WalletAddress", - "label": null, - "stats": { - "totalSnipes": 0, - "successRate": 0, - "avgEntryDelaySec": 0, - "avgReturnMultiple": 0, - "bestReturn": null, - "worstReturn": null, - "activeSince": null - }, - "recentTokens": [], - "reputation": "unknown", - "tags": [], - "createdAt": "2026-02-17T12:00:00.000Z", - "updatedAt": "2026-02-17T12:00:00.000Z" - } - } -} -``` - -Reputation levels: `unknown` β†’ `newcomer` β†’ `consistent` β†’ `elite` (based on success rate + volume). - -#### `data/pumpfun/events.jsonl` β€” Trade & Price Events (Append Log) - -```jsonl -{"id":"evt-uuid","mint":"TokenMint","type":"launch","creator":"Wallet","signature":"txSig","slot":123456789,"timestamp":"2026-02-17T12:00:00.000Z"} -{"id":"evt-uuid","mint":"TokenMint","type":"trade","side":"buy","wallet":"Wallet","amountSol":1.5,"amountTokens":1000000,"signature":"txSig","slot":123456790,"timestamp":"2026-02-17T12:00:01.000Z"} -{"id":"evt-uuid","mint":"TokenMint","type":"enrichment","holders":250,"volume24h":50000,"marketCap":120000,"liquidity":5000,"securityScore":85,"source":"birdeye","timestamp":"2026-02-17T12:01:00.000Z"} -{"id":"evt-uuid","mint":"TokenMint","type":"graduation","bondingCurve":"Address","signature":"txSig","timestamp":"2026-02-17T14:00:00.000Z"} -``` - -Event types: `launch`, `trade`, `enrichment`, `graduation`, `sniper_detected`, `alert`. - -### MVP Architecture - -``` - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” - β”‚ Helius Webhooks β”‚ - β”‚ (pump.fun program monitoring) β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - POST /api/pumpfun/webhook - (new token + trade events) - β”‚ - β–Ό -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ PortOS Server β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ Webhook Route │──▢│ PumpFun Service β”‚ β”‚ -β”‚ β”‚ (validates + β”‚ β”‚ - detectToken() β”‚ β”‚ -β”‚ β”‚ parses events) β”‚ β”‚ - recordTrade() β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ - detectSniper() β”‚ β”‚ -β”‚ β”‚ - enrichToken() β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ - getStats() β”‚ β”‚ -β”‚ β”‚ REST Routes │──▢│ - alertCheck() β”‚ β”‚ -β”‚ β”‚ GET /tokens β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ GET /snipers β”‚ β”‚ β”‚ -β”‚ β”‚ GET /stats β”‚ β–Ό β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ Data Layer β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ tokens.json β”‚ β”‚ -β”‚ β”‚ Enrichment β”‚ β”‚ snipers.json β”‚ β”‚ -β”‚ β”‚ Scheduler │──▢│ events.jsonl β”‚ β”‚ -β”‚ β”‚ (polls Birdeye β”‚ β”‚ meta.json β”‚ β”‚ -β”‚ β”‚ for active β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ tokens) β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ -β”‚ β”‚ Socket.IO │──▢│ Client UI β”‚ β”‚ -β”‚ β”‚ pumpfun:token β”‚ β”‚ /pumpfun β”‚ β”‚ -β”‚ β”‚ pumpfun:trade β”‚ β”‚ /pumpfun/tokens β”‚ β”‚ -β”‚ β”‚ pumpfun:alert β”‚ β”‚ /pumpfun/snipers β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - β”‚ - Token enrichment - (market data, security) - β”‚ - β–Ό - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” - β”‚ Birdeye REST API β”‚ - β”‚ token_overview, token_security, β”‚ - β”‚ OHLCV, price history β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -### Implementation Phases - -#### P1: Token Detection (Helius webhook receiver) -- Create `data/pumpfun/` directory with `meta.json`, `tokens.json`, `snipers.json`, `events.jsonl` -- Create `server/services/pumpfun.js` β€” core service with token detection, event logging, file I/O with caching -- Create `server/routes/pumpfun.js` β€” webhook endpoint (`POST /api/pumpfun/webhook`) + REST endpoints -- Add Zod schemas for webhook payload validation and API inputs in `server/lib/validation.js` -- Parse Helius enhanced transaction events: extract mint address, creator, initial supply from `TOKEN_MINT` type events where `source` is `PUMP_FUN` -- Persist detected tokens to `tokens.json`, log launch events to `events.jsonl` -- Mount routes in `server/index.js` -- Emit Socket.IO `pumpfun:token` events for real-time UI updates - -#### P2: Token Enrichment (Birdeye integration) -- Add enrichment scheduler to `pumpfun.js` β€” polls Birdeye `/defi/token_overview` and `/defi/token_security` for active tokens -- Update token records with market cap, volume, holder count, security score, liquidity -- Track ATH (all-time high) price and multiple for each token -- Log enrichment snapshots to `events.jsonl` for historical tracking -- Detect bonding curve graduation events -- Filter out rug-pulls using Birdeye security endpoint (mint authority, freeze authority checks) - -#### P3: Sniper Detection & Analytics -- Parse early buy transactions (within first 60s of launch) from Helius trade events -- Cross-reference buyer wallets across multiple launches to identify repeat snipers -- Build sniper reputation scores: success rate, avg entry delay, avg return multiple -- Track sniper overlap β€” when 3+ known snipers enter the same token, flag as high signal -- Create `/pumpfun/snipers` REST endpoint for sniper leaderboard data -- Emit `pumpfun:alert` Socket.IO events when sniper overlap threshold exceeded - -#### P4: Dashboard UI -- Create `client/src/pages/PumpFun.jsx` β€” main page with tab navigation -- `/pumpfun/tokens` β€” live token feed with status, metrics, performance columns, sortable -- `/pumpfun/snipers` β€” sniper leaderboard with wallet, stats, recent tokens -- `/pumpfun/stats` β€” aggregate dashboard: launches/day, avg performer, top tokens, sniper activity -- `/pumpfun/settings` β€” API key management, filter config, alert thresholds -- Real-time updates via Socket.IO subscription -- Deep-linkable routes per CLAUDE.md conventions - -### Files to Create - -**New files:** -- `data/pumpfun/meta.json` β€” configuration -- `data/pumpfun/tokens.json` β€” token entity store -- `data/pumpfun/snipers.json` β€” sniper entity store -- `data/pumpfun/events.jsonl` β€” event log -- `server/services/pumpfun.js` β€” core service -- `server/routes/pumpfun.js` β€” API routes -- `client/src/pages/PumpFun.jsx` β€” dashboard page - -**Modified files:** -- `server/lib/validation.js` β€” add pumpfun Zod schemas -- `server/index.js` β€” mount pumpfun routes -- `client/src/App.jsx` β€” add PumpFun route -- `client/src/components/Sidebar.jsx` β€” add PumpFun nav item - -### Design Decisions - -1. **Mint address as entity key** (not UUID) β€” tokens are uniquely identified by their on-chain mint address, avoiding a mapping layer -2. **Webhook-first** β€” push model from Helius eliminates polling overhead and gives ~1s detection latency -3. **Enrichment on schedule, not inline** β€” Birdeye calls happen on a timer (60s default) for active tokens rather than blocking the webhook handler -4. **JSONL for events** β€” trade/price events are high-volume, append-only; JSONL avoids rewriting large files -5. **Sniper detection is cross-launch** β€” individual trades are meaningless; the value is in correlating the same wallet appearing in multiple successful early entries -6. **No external DB** β€” consistent with PortOS's JSON file persistence pattern; suitable for the expected data volume (hundreds of tokens/day, not millions) -7. **Security filtering built-in** β€” Birdeye's token_security endpoint flags rug-pull indicators early, preventing noise in the tracking data - ---- - -## M42: Unified Digital Twin Identity System - -### Motivation - -Four separate workstreams converge on the same vision: a personal digital twin that knows *who you are* biologically, temporally, aesthetically, and existentially. Today these live as disconnected features: - -| Subsystem | Current State | Location | -|-----------|--------------|----------| -| **Genome** | Fully implemented: 23andMe upload, 117 curated SNP markers across 32 categories, ClinVar integration, epigenetic tracking | `server/services/genome.js`, `GenomeTab.jsx`, `data/digital-twin/genome.json` | -| **Chronotype** | Genetic data ready: 5 sleep/circadian markers (CLOCK rs1801260, DEC2 rs57875989, PER2 rs35333999, CRY1 rs2287161, MTNR1B rs10830963) + `daily_routines` enrichment category. Derivation service not yet built | `curatedGenomeMarkers.js` sleep category, `ENRICHMENT_CATEGORIES.daily_routines` | -| **Aesthetic Taste** | P2 complete: Taste questionnaire with 5 sections (movies, music, visual_art, architecture, food), conversational Q&A, AI summary generation. Enrichment categories also feed taste data from book/movie/music lists | `TasteTab.jsx`, `taste-questionnaire.js`, `data/digital-twin/taste-profile.json` | -| **Goal Tracking** | Partially exists: `COS-GOALS.md` for CoS missions, `TASKS.md` for user tasks, `EXISTENTIAL.md` soul doc | `data/COS-GOALS.md`, `data/TASKS.md`, `data/digital-twin/EXISTENTIAL.md` | - -These should be unified under a single **Identity** architecture so the twin can reason across all dimensions (e.g., "your CLOCK gene says evening chronotype β€” schedule deep work after 8pm" or "given your longevity markers and age, here's how to prioritize your 10-year goals"). - -### Data Model - -#### Entity: `identity.json` (new, top-level twin orchestration) - -```json -{ - "version": "1.0.0", - "createdAt": "2026-02-12T00:00:00.000Z", - "updatedAt": "2026-02-12T00:00:00.000Z", - "sections": { - "genome": { "status": "active", "dataFile": "genome.json", "markerCount": 117, "categoryCount": 32, "lastScanAt": "..." }, - "chronotype": { "status": "active", "dataFile": "chronotype.json", "derivedFrom": ["genome:sleep", "enrichment:daily_routines"] }, - "aesthetics": { "status": "active", "dataFile": "aesthetics.json", "derivedFrom": ["enrichment:aesthetics", "enrichment:favorite_books", "enrichment:favorite_movies", "enrichment:music_taste"] }, - "goals": { "status": "active", "dataFile": "goals.json" } - }, - "crossLinks": [] -} -``` - -#### Entity: Chronotype Profile (`chronotype.json`) - -Derived from genome sleep markers + daily_routines enrichment answers + user overrides. - -```json -{ - "chronotype": "evening", - "confidence": 0.75, - "sources": { - "genetic": { - "clockGene": { "rsid": "rs1801260", "genotype": "T/C", "signal": "mild_evening" }, - "dec2": { "rsid": "rs57875989", "genotype": "G/G", "signal": "standard_sleep_need" }, - "per2": { "rsid": "rs35333999", "genotype": "C/C", "signal": "standard_circadian" }, - "cry1": { "rsid": "rs2287161", "genotype": "C/C", "signal": "standard_period" }, - "mtnr1b": { "rsid": "rs10830963", "genotype": "T/T", "signal": "normal_melatonin_receptor" } - }, - "behavioral": { - "preferredWakeTime": "08:30", - "preferredSleepTime": "00:30", - "peakFocusWindow": "20:00-02:00", - "energyDipWindow": "14:00-16:00" - } - }, - "recommendations": { - "deepWork": "20:00-02:00", - "lightTasks": "09:00-12:00", - "exercise": "17:00-19:00", - "caffeineCutoff": "14:00" - }, - "updatedAt": "2026-02-12T00:00:00.000Z" -} -``` - -**Derivation logic**: Five genome sleep markers provide the genetic baseline: CLOCK (evening preference), DEC2 (sleep duration need), PER2 (circadian period), CRY1 (delayed sleep phase), MTNR1B (melatonin receptor / nighttime glucose). The `daily_routines` enrichment answers provide behavioral confirmation. When genetic and behavioral signals agree, confidence is high. When they disagree, surface the conflict for user review. Caffeine cutoff cross-references caffeine metabolism markers (CYP1A2 rs762551, ADA rs73598374). MTNR1B status also informs late-eating recommendations. - -#### Entity: Aesthetic Taste Profile (`aesthetics.json`) - -Consolidates scattered aesthetic data into a structured profile. - -```json -{ - "profile": { - "visualStyle": [], - "narrativePreferences": [], - "musicProfile": [], - "designPrinciples": [], - "antiPatterns": [] - }, - "sources": { - "enrichmentAnswers": { "aesthetics": "...", "questionsAnswered": 0 }, - "bookAnalysis": { "themes": [], "sourceDoc": "BOOKS.md" }, - "movieAnalysis": { "themes": [], "sourceDoc": "MOVIES.md" }, - "musicAnalysis": { "themes": [], "sourceDoc": "AUDIO.md" } - }, - "questionnaire": { - "completed": false, - "sections": [ - "visual_design", - "color_and_mood", - "architecture_and_space", - "fashion_and_texture", - "sound_and_music", - "narrative_and_story", - "anti_preferences" - ] - }, - "updatedAt": null -} -``` - -**Derivation logic**: Taste is partially observable from existing enrichment data (book/movie/music lists). The aesthetic questionnaire fills in the rest via prompted sections β€” each section shows image/description pairs and asks for preference rankings. LLM analysis of existing media lists extracts themes (e.g., "brutalist minimalism", "high-contrast neon", "atmospheric dread") to seed the profile. - -#### Entity: Mortality-Aware Goals (`goals.json`) - -```json -{ - "birthDate": "1980-01-15", - "lifeExpectancyEstimate": { - "baseline": 78.5, - "adjusted": null, - "adjustmentFactors": { - "geneticLongevity": null, - "cardiovascularRisk": null, - "lifestyle": null - }, - "source": "SSA actuarial table + genome markers" - }, - "timeHorizons": { - "yearsRemaining": null, - "healthyYearsRemaining": null, - "percentLifeComplete": null - }, - "goals": [ - { - "id": "uuid", - "title": "...", - "description": "...", - "horizon": "5-year", - "category": "creative|family|health|financial|legacy|mastery", - "urgency": null, - "status": "active|completed|abandoned", - "milestones": [], - "createdAt": "...", - "updatedAt": "..." - } - ], - "updatedAt": null -} -``` - -**Derivation logic**: Birth date + actuarial baseline + genome longevity/cardiovascular markers produce an adjusted life expectancy. This creates urgency scoring: a "legacy" goal with a 20-year timeline hits differently at 30% life-complete vs 70%. Goals are categorized and scored by time-decay urgency. The system can suggest reprioritization when markers indicate risk factors (e.g., high cardiovascular genetic risk β†’ prioritize health goals). - -### Entity Relationships - -``` - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” - β”‚ identity.json β”‚ - β”‚ (orchestrator) β”‚ - β””β”€β”€β”¬β”€β”€β”€β”¬β”€β”€β”€β”¬β”€β”€β”€β”¬β”€β”€β”˜ - β”‚ β”‚ β”‚ β”‚ - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ └──────────┐ - β–Ό β–Ό β–Ό β–Ό - β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β” - β”‚ Genome β”‚ β”‚Chronotypeβ”‚ β”‚Aestheticsβ”‚ β”‚ Goals β”‚ - β”‚genome.jsonβ”‚ β”‚chrono.jsonβ”‚ β”‚aesth.jsonβ”‚ β”‚goals.jsonβ”‚ - β””β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”˜ - β”‚ β”‚ β”‚ β”‚ - β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚ - β”‚ β”‚ derives from β”‚ β”‚ - β”œβ”€β”€β”€β”€β”€ sleep markers β”‚ β”‚ - β”‚ β”‚ β”‚ β”‚ - β”‚ β”‚ caffeine cutoff ◄────── β”‚ - β”‚ β”‚ from caffeine markers β”‚ β”‚ - β”‚ β”‚ β”‚ β”‚ - β”‚ └──────────────────────── β”‚ - β”‚ β”‚ β”‚ - β”‚ longevity/cardio ────────────────────► β”‚ - β”‚ markers inform β”‚ urgency β”‚ - β”‚ life expectancy β”‚ scoring β”‚ - β”‚ β”‚ β”‚ - β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ - β”‚ β”‚ derives from β”‚ - β”‚ β”‚ enrichment: aesthetics, β”‚ - β”‚ β”‚ books, movies, music β”‚ - β”‚ β”‚ β”‚ - β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ - All reference meta.json - (documents, enrichment, traits) -``` - -**Cross-cutting links** (stored in `identity.json.crossLinks`): -- `genome:sleep` β†’ `chronotype:genetic` (CLOCK/DEC2/PER2/CRY1/MTNR1B markers feed chronotype) -- `genome:caffeine` β†’ `chronotype:recommendations.caffeineCutoff` (CYP1A2/ADA markers set cutoff) -- `genome:sleep:mtnr1b` β†’ `chronotype:recommendations.lateEatingCutoff` (MTNR1B impairs nighttime glucose) -- `genome:longevity` + `genome:cardiovascular` β†’ `goals:lifeExpectancyEstimate` (risk-adjusted lifespan) -- `enrichment:daily_routines` β†’ `chronotype:behavioral` (self-reported schedule) -- `enrichment:aesthetics` + `enrichment:favorite_*` + `enrichment:music_taste` β†’ `aesthetics:profile` (taste extraction) -- `traits:valuesHierarchy` β†’ `goals:category` priority weighting (autonomy-valuing person weights mastery goals higher) - -### Identity Page Structure - -The existing Digital Twin page at `/digital-twin/:tab` gets a new **Identity** tab that serves as the unified view. Individual subsystem tabs (Genome, Enrich) remain for deep dives. - -#### Route: `/digital-twin/identity` - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ Digital Twin β”‚ -β”‚ Overview | Documents | ... | Identity | Genome | ... β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ β”‚ -β”‚ β”Œβ”€ Identity Dashboard ──────────────────────────────────┐ β”‚ -β”‚ β”‚ Completeness: β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‘β–‘ 72% β”‚ β”‚ -β”‚ β”‚ 4 sections: Genome βœ“ Chronotype ◐ Taste β—‹ Goals β—‹β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€ Genome Summary Card ─────────────────────────────────┐ β”‚ -β”‚ β”‚ 117 markers scanned across 32 categories β”‚ β”‚ -β”‚ β”‚ Key findings: ~20 beneficial, ~40 concern, ~5 major β”‚ β”‚ -β”‚ β”‚ [View Full Genome β†’] β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€ Chronotype Card ─────────────────────────────────────┐ β”‚ -β”‚ β”‚ Type: Evening Owl (75% confidence from 5 markers) β”‚ β”‚ -β”‚ β”‚ Genetic: CLOCK T/C + CRY1 C/C + PER2 C/C + DEC2 G/Gβ”‚ β”‚ -β”‚ β”‚ Peak focus: 8pm-2am | Caffeine cutoff: 2pm β”‚ β”‚ -β”‚ β”‚ Late eating cutoff: 8pm (MTNR1B-informed) β”‚ β”‚ -β”‚ β”‚ [Configure Schedule β†’] β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€ Aesthetic Taste Card ────────────────────────────────┐ β”‚ -β”‚ β”‚ Taste Tab: 0/5 sections completed (P2 UI ready) β”‚ β”‚ -β”‚ β”‚ Detected themes from media: brutalist, atmospheric β”‚ β”‚ -β”‚ β”‚ [Continue Taste Questionnaire β†’] [Go to Taste Tab β†’] β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€ Life Goals Card ─────────────────────────────────────┐ β”‚ -β”‚ β”‚ Status: Not configured β”‚ β”‚ -β”‚ β”‚ Set birth date and goals to enable mortality-aware β”‚ β”‚ -β”‚ β”‚ priority scoring β”‚ β”‚ -β”‚ β”‚ [Set Up Goals β†’] β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β”‚ β”Œβ”€ Cross-Insights ──────────────────────────────────────┐ β”‚ -β”‚ β”‚ "Your CLOCK gene evening tendency + caffeine β”‚ β”‚ -β”‚ β”‚ sensitivity suggest cutting coffee by 2pm" β”‚ β”‚ -β”‚ β”‚ "Longevity marker FOXO3A T/T (concern) + IL-6 C/C β”‚ β”‚ -β”‚ β”‚ (inflammation concern) β€” prioritize health goals" β”‚ β”‚ -β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ -β”‚ β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -#### Sub-routes for deep dives: -- `/digital-twin/identity` β€” Dashboard overview (above) -- `/digital-twin/identity/chronotype` β€” Full chronotype editor with schedule builder -- `/digital-twin/identity/taste` β€” Aesthetic questionnaire flow (section-by-section) -- `/digital-twin/identity/goals` β€” Goal CRUD with urgency visualization -- `/digital-twin/genome` β€” Existing genome tab (unchanged) - -### Implementation Phases - -#### P1: Identity Orchestrator & Chronotype (data layer) -- Create `data/digital-twin/identity.json` with section status tracking -- Create `server/services/identity.js` β€” orchestrator that reads from genome, enrichment, taste-profile, and new data files -- Create `data/digital-twin/chronotype.json` β€” derive from 5 genome sleep markers + daily_routines enrichment -- Add `GET /api/digital-twin/identity` route returning unified section status -- Add `GET/PUT /api/digital-twin/identity/chronotype` routes -- Derivation function: `deriveChronotypeFromGenome(genomeSummary)` extracts all 5 sleep markers (CLOCK, DEC2, PER2, CRY1, MTNR1B) β†’ composite chronotype signal with weighted confidence -- Cross-reference CYP1A2/ADA caffeine markers and MTNR1B melatonin receptor for caffeine cutoff and late-eating recommendations - -#### P2: Aesthetic Taste Questionnaire βœ… -- Created `data/digital-twin/taste-profile.json` for structured taste preference storage -- Created `server/services/taste-questionnaire.js` with 5 taste sections (movies, music, visual_art, architecture, food), each with core questions and branching follow-ups triggered by keyword detection -- Added 7 API routes under `/api/digital-twin/taste/*` (profile, sections, next question, answer, responses, summary, reset) -- Built `TasteTab.jsx` conversational Q&A UI with section grid, question flow, review mode, and AI-powered summary generation -- Responses persisted to taste-profile.json and appended to AESTHETICS.md for digital twin context -- Added Taste tab to Digital Twin page navigation - -#### P3: Mortality-Aware Goal Tracking -- Create `data/digital-twin/goals.json` -- Add `GET/POST/PUT/DELETE /api/digital-twin/identity/goals` routes -- Birth date input + SSA actuarial table lookup -- Genome-adjusted life expectancy: weight longevity markers (5 markers: FOXO3A, IGF1R, CETP, IPMK, TP53) and cardiovascular risk markers (5 markers: Factor V, 9p21, Lp(a), LPA aspirin, PCSK9) into adjustment factor -- Time-horizon calculation: years remaining, healthy years, percent complete -- Urgency scoring: `urgency = (goalHorizonYears - yearsRemaining) / goalHorizonYears` normalized -- Goal CRUD with category tagging and milestone tracking - -#### P4: Identity Tab UI -- Add `identity` tab to `TABS` constant in `constants.js` -- Create `IdentityTab.jsx` with dashboard layout (4 summary cards + cross-insights) -- Create `ChronotypeEditor.jsx` β€” schedule visualization and override controls -- Create `TasteQuestionnaire.jsx` β€” section-by-section prompted flow -- Create `GoalTracker.jsx` β€” goal list with urgency heatmap and timeline view -- Wire sub-routes for deep dives - -#### P2.5: Digital Twin Aesthetic Taste Prompting (brain idea 608dc733) - -##### Problem - -P2's Taste questionnaire uses static questions and keyword-triggered follow-ups. The questions are good but generic β€” they don't reference anything the twin already knows about the user. Brain idea 608dc733 proposes using the digital twin's existing knowledge (books, music, movie lists, enrichment answers, personality traits) to generate personalized, conversational prompts that feel like talking to someone who already knows you rather than filling out a survey. - -##### What Data to Capture - -The aesthetic taste system captures preferences across **7 domains**, extending P2's 5 sections with 2 new ones (fashion/texture and digital/interface): - -| Domain | Data Captured | Sources That Seed It | -|--------|--------------|---------------------| -| **Movies & Film** | Visual style preferences, narrative structure, mood/atmosphere, genre affinities, anti-preferences, formative films | BOOKS.md (narrative taste), enrichment:favorite_movies, existing P2 responses | -| **Music & Sound** | Functional use (focus/energy/decompress), genre affinities, production preferences, anti-sounds, formative artists | AUDIO.md, enrichment:music_taste, existing P2 responses | -| **Visual Art & Design** | Minimalism vs maximalism spectrum, color palette preferences, design movements, typography, layout sensibility | CREATIVE.md, enrichment:aesthetics, existing P2 responses | -| **Architecture & Spaces** | Material preferences, light quality, scale/intimacy, indoor-outdoor relationship, sacred vs functional | enrichment:aesthetics, existing P2 responses | -| **Food & Culinary** | Flavor profiles, cuisine affinities, cooking philosophy, dining experience priorities, sensory texture preferences | enrichment:daily_routines (meal patterns), existing P2 responses | -| **Fashion & Texture** *(new)* | Material/fabric preferences, silhouette comfort, color wardrobe, formality spectrum, tactile sensitivity | genome:sensory markers (if available), enrichment:aesthetics | -| **Digital & Interface** *(new)* | Dark vs light mode, information density, animation tolerance, typography preferences, notification style, tool aesthetics | PREFERENCES.md, existing PortOS theme choices (port-bg, port-card etc.) | - -Each domain captures: -- **Positive affinities** β€” what they're drawn to and why -- **Anti-preferences** β€” what they actively avoid (often more revealing than likes) -- **Functional context** β€” how the preference serves them (focus, comfort, identity, social) -- **Formative influences** β€” early experiences that shaped the preference -- **Evolution** β€” how the preference has changed over time - -##### Conversational Prompting Flow - -The key design principle: **conversation, not survey**. The twin generates questions that reference things it already knows, creating a dialogue that feels like it's building on shared context. - -**Flow architecture:** - -``` -β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -β”‚ 1. Context Aggregation β”‚ -β”‚ Read: BOOKS.md, AUDIO.md, CREATIVE.md, β”‚ -β”‚ PREFERENCES.md, enrichment answers, β”‚ -β”‚ existing taste-profile.json responses, β”‚ -β”‚ personality traits (Big Five Openness) β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ 2. Static Core Question (from P2) β”‚ -β”‚ Serve the existing static question first β”‚ -β”‚ to establish baseline in that domain β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ 3. Personalized Follow-Up Generation β”‚ -β”‚ LLM generates 1 contextual follow-up using β”‚ -β”‚ identity context + previous answer β”‚ -β”‚ e.g., "You listed Blade Runner β€” what about β”‚ -β”‚ its visual language specifically grabbed you?" β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ 4. Depth Probing (optional, user-initiated) β”‚ -β”‚ "Want to go deeper?" button generates β”‚ -β”‚ another personalized question that connects β”‚ -β”‚ across domains (e.g., music taste ↔ visual) β”‚ -β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ -β”‚ 5. Summary & Synthesis β”‚ -β”‚ After core + follow-ups complete, LLM β”‚ -β”‚ generates section summary + cross-domain β”‚ -β”‚ pattern detection β”‚ -β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -``` - -**Prompt template for personalized question generation:** - -``` -You are a thoughtful interviewer building an aesthetic taste profile. -You already know the following about this person: - -## Identity Context -{identityContext β€” excerpts from BOOKS.md, AUDIO.md, enrichment answers, traits} - -## Previous Responses in This Section -{existingResponses β€” Q&A pairs from taste-profile.json for this section} - -## Section: {sectionLabel} - -Generate ONE follow-up question that: -1. References something specific from their identity context or previous answers -2. Probes WHY they prefer what they do, not just WHAT -3. Feels conversational β€” like a friend who knows them asking a natural question -4. Explores an angle their previous answers haven't covered yet -5. Is concise (1-2 sentences max) - -Do NOT: -- Ask generic questions that ignore the context -- Repeat topics already covered in previous responses -- Use survey language ("On a scale of 1-10...") -- Ask multiple questions at once -``` - -**Example personalized exchanges:** - -> **Static (P2):** "Name 3-5 films you consider near-perfect." -> **User:** "Blade Runner, Stalker, Lost in Translation, Drive, Arrival" -> -> **Personalized (P2.5):** "Your BOOKS.md lists several sci-fi titles with themes of isolation and altered perception. Four of your five film picks share that same atmosphere. Is solitude a feature of stories you're drawn to, or is it more about the specific visual treatment of lonely spaces?" - -> **Static (P2):** "What artists or albums have had a lasting impact?" -> **User:** "Radiohead, Boards of Canada, Massive Attack" -> -> **Personalized (P2.5):** "All three of those artists layer heavy texture over minimalist structures. Your CREATIVE.md mentions an appreciation for 'controlled complexity.' Does this principle β€” density within restraint β€” apply to how you think about visual design too?" - -##### Data Model β€” Where Taste Lives - -Taste data lives in **two files** with distinct roles: - -**1. Raw questionnaire responses: `data/digital-twin/taste-profile.json`** (existing, extended) - -```json -{ - "version": "2.0.0", - "createdAt": "...", - "updatedAt": "...", - "sections": { - "movies": { - "status": "completed", - "responses": [ - { - "questionId": "movies-core-1", - "answer": "Blade Runner, Stalker, Lost in Translation...", - "answeredAt": "...", - "source": "static" - }, - { - "questionId": "movies-p25-1", - "answer": "It's not solitude per se, it's the visual...", - "answeredAt": "...", - "source": "personalized", - "generatedQuestion": "Your BOOKS.md lists several sci-fi titles...", - "identityContextUsed": ["BOOKS.md:sci-fi-themes", "taste:movies-core-1"] - } - ], - "summary": "..." - }, - "fashion": { "status": "pending", "responses": [], "summary": null }, - "digital": { "status": "pending", "responses": [], "summary": null } - }, - "profileSummary": null, - "lastSessionAt": null -} -``` - -Changes from v1: -- `source` field distinguishes static vs personalized questions -- `generatedQuestion` stores the LLM-generated question text (since personalized questions aren't in the static definition) -- `identityContextUsed` tracks which identity sources informed the question (for provenance) -- Two new sections: `fashion`, `digital` -- Version bumped to 2.0.0 - -**2. Synthesized aesthetic profile: `data/digital-twin/aesthetics.json`** (planned in P1, populated by P2.5) - -```json -{ - "version": "1.0.0", - "updatedAt": "...", - "profile": { - "visualStyle": ["brutalist minimalism", "high-contrast neon", "controlled complexity"], - "narrativePreferences": ["isolation themes", "slow burn", "ambiguity over resolution"], - "musicProfile": ["textural electronica", "atmospheric layering", "functional listening"], - "spatialPreferences": ["raw materials", "dramatic light", "intimacy over grandeur"], - "culinaryIdentity": ["umami-driven", "improvisational cooking", "experience over formality"], - "fashionSensibility": ["monochrome", "natural fibers", "minimal branding"], - "digitalAesthetic": ["dark mode", "high information density", "subtle animation"], - "antiPatterns": ["visual clutter", "forced symmetry", "saccharine sentimentality"], - "corePrinciples": ["density within restraint", "function informing form", "earned complexity"] - }, - "sources": { - "tasteQuestionnaire": { - "sectionsCompleted": 7, - "totalResponses": 28, - "lastUpdated": "..." - }, - "enrichment": { - "aesthetics": { "questionsAnswered": 5 }, - "favoriteBooks": { "analyzed": true, "themes": ["existential sci-fi", "systems thinking"] }, - "favoriteMovies": { "analyzed": true, "themes": ["atmospheric isolation", "neon noir"] }, - "musicTaste": { "analyzed": true, "themes": ["textural electronica", "ambient"] } - }, - "documents": ["BOOKS.md", "AUDIO.md", "CREATIVE.md", "PREFERENCES.md"] - }, - "crossDomainPatterns": [ - "Preference for 'controlled complexity' appears across music (layered textures), visual art (minimalist structure with dense detail), architecture (raw materials with precise placement), and food (complex umami built from simple ingredients)", - "Anti-preference for overt sentimentality spans film (avoids melodrama), music (dislikes saccharine pop), and design (rejects decorative ornamentation)" - ], - "genomicCorrelations": { - "tasteReceptorGenes": "TAS2R38 status may correlate with bitter-food tolerance preferences", - "sensoryProcessing": "Olfactory receptor variants may explain heightened texture sensitivity" - } -} -``` - -This file is the **canonical aesthetic profile** referenced by the Identity orchestrator (`identity.json`). It is regenerated whenever taste-profile.json accumulates significant new responses. - -##### Implementation Steps - -1. **Add 2 new sections** to `TASTE_SECTIONS` in `taste-questionnaire.js`: `fashion` and `digital`, each with 3 core questions and keyword-triggered follow-ups -2. **Add `aggregateIdentityContext(sectionId)`** to `taste-questionnaire.js` β€” reads BOOKS.md, AUDIO.md, CREATIVE.md, PREFERENCES.md, enrichment answers, and existing taste responses to build a context string for the LLM -3. **Add `generatePersonalizedTasteQuestion(sectionId, existingResponses, identityContext)`** β€” calls the active AI provider with the prompt template above, returns a single personalized follow-up question -4. **Add `POST /api/digital-twin/taste/:section/personalized-question`** route that returns a generated question -5. **Extend `submitAnswer()`** to accept `source: 'personalized'` and store `generatedQuestion` + `identityContextUsed` metadata -6. **Add "Go deeper" button** to TasteTab.jsx after each static follow-up cycle completes β€” clicking it calls the personalized question endpoint -7. **Add `generateAestheticsProfile()`** to `taste-questionnaire.js` β€” synthesizes all taste-profile.json responses + enrichment data into `aesthetics.json` -8. **Bump taste-profile.json version** to 2.0.0, migrate existing responses to include `source: 'static'` -9. **Update TasteTab.jsx** to render personalized questions differently (subtle indicator showing the twin referenced specific context) - -##### Prerequisite Relaxation - -The original spec listed P1 (Identity orchestrator) as a hard prerequisite. This is relaxed: P2.5 can read identity documents directly from the filesystem (`BOOKS.md`, `AUDIO.md`, etc.) and enrichment data from `meta.json` without needing the orchestrator layer. The orchestrator becomes useful for caching and cross-section queries but is not strictly required for context aggregation. - -#### P5: Cross-Insights Engine -- Add `generateCrossInsights(identity)` in identity service -- Cross-reference genome markers with chronotype, goals, and enrichment data -- Generate natural-language insight strings (e.g., caffeine + chronotype, longevity + goal urgency) -- Display on Identity dashboard and inject into CoS context when relevant -- Consider autonomous job: periodic identity insight refresh -- Example cross-insights from current marker data: - - CLOCK + CRY1 + PER2 β†’ composite chronotype confidence (3 markers agreeing = high confidence evening/morning) - - MTNR1B concern + evening chronotype β†’ "avoid eating after 8pm β€” your melatonin receptor variant impairs late glucose handling" - - CYP1A2 slow metabolizer + CLOCK evening β†’ "caffeine cutoff by noon, not 2pm" - - FOXO3A/CETP/IGF1R longevity markers + cardiovascular risk β†’ adjusted life expectancy for goal urgency - -### Identity Extension Roadmap - -This roadmap connects brain ideas and the Genome Section Integration project (0e6a0332) into a unified implementation sequence. - -#### Source Ideas -- **Brain idea 608dc733**: "Prompting Aesthetic Taste Docs via Digital Twin" β€” use the twin's existing knowledge to generate personalized aesthetic preference questions -- **Brain idea 284dd487**: "Genome Types & Chronotype Trait" β€” derive chronotype from 5 sleep/circadian markers + behavioral data -- **Project 0e6a0332**: "Genome Section Integration" β€” unify genome data with Identity page architecture - -#### Phase Dependency Graph - -``` -P1: Identity Orchestrator & Chronotype ──── (brain idea 284dd487) - β”‚ Creates identity.json, chronotype.json, - β”‚ identity service, derivation from 5 sleep markers - β”‚ - β”œβ”€β–Ί P2.5: Personalized Taste Prompting ─── (brain idea 608dc733) - β”‚ Uses identity context to generate smart taste questions - β”‚ Enhances existing TasteTab with twin-aware follow-ups - β”‚ - β”œβ”€β–Ί P3: Mortality-Aware Goal Tracking - β”‚ Birth date + genome longevity/cardio markers β†’ life expectancy - β”‚ Urgency scoring for prioritized goal management - β”‚ - └─► P4: Identity Tab UI - Dashboard with summary cards for all 4 sections - Sub-routes for chronotype, taste, goals deep dives - β”‚ - └─► P5: Cross-Insights Engine - Reads all sections, generates natural-language insights - Injects identity context into CoS agent briefings -``` - -#### Implementation Priority -1. **P1** β€” Foundation: nothing else works without the orchestrator -2. **P2.5** β€” Quick win: enhances existing Taste tab with minimal new infrastructure -3. **P3** β€” New feature: mortality-aware goals need genome data flowing through identity service -4. **P4** β€” UI: renders what P1-P3 produce -5. **P5** β€” Polish: cross-entity reasoning requires all sections populated - -### Data Flow +## Planned Feature Details -``` -User uploads 23andMe β†’ genome.json (117 markers, 32 categories) - ↓ -Identity service reads 5 sleep markers + 2 caffeine markers - ↓ -Derives chronotype.json (+ behavioral input from daily_routines enrichment) - ↓ -Twin reads identity context β†’ generates personalized taste questions (P2.5) - ↓ -User completes taste questionnaire β†’ taste-profile.json β†’ aesthetics.json - ↓ -LLM analyzes books/movies/music docs β†’ seeds aesthetic profile themes - ↓ -User sets birth date β†’ goals.json (life expectancy from actuarial + 10 genome markers) - ↓ -Cross-insights engine reads all 4 sections β†’ generates natural-language insights - ↓ -Identity tab renders unified dashboard with summary cards + insights - ↓ -CoS injects identity context into agent briefings when relevant -``` +### M7: App Templates -### Files to Create/Modify +Templates allow creating new apps from pre-configured project structures. -**New files:** -- `data/digital-twin/identity.json` β€” orchestrator metadata -- `data/digital-twin/chronotype.json` β€” derived chronotype profile -- `data/digital-twin/aesthetics.json` β€” taste profile -- `data/digital-twin/goals.json` β€” mortality-aware goals -- `server/services/identity.js` β€” identity orchestration service -- `server/routes/identity.js` β€” API routes -- `server/lib/identityValidation.js` β€” Zod schemas -- `client/src/components/digital-twin/tabs/IdentityTab.jsx` β€” dashboard -- `client/src/components/digital-twin/identity/ChronotypeEditor.jsx` -- `client/src/components/digital-twin/identity/TasteQuestionnaire.jsx` -- `client/src/components/digital-twin/identity/GoalTracker.jsx` -- `client/src/components/digital-twin/identity/CrossInsights.jsx` +**Built-in Template: PortOS Stack** +- Express.js API server +- React + Vite frontend +- Tailwind CSS styling +- PM2 ecosystem configuration +- GitHub Actions CI/CD workflows +- Auto-versioning system -**Modified files:** -- `client/src/components/digital-twin/constants.js` β€” add Identity tab -- `client/src/pages/DigitalTwin.jsx` β€” add Identity tab rendering -- `client/src/services/api.js` β€” add identity API methods -- `server/index.js` β€” mount identity routes -- `server/services/taste-questionnaire.js` β€” add `generatePersonalizedTasteQuestion()` using identity context (P2.5) -- `client/src/components/digital-twin/tabs/TasteTab.jsx` β€” wire personalized question generation (P2.5) +**Features** +1. Template Selection - Browse available templates with feature descriptions +2. App Creation - Scaffold new project with chosen name and target directory +3. Custom Templates - Register additional templates from local paths +4. Template Management - View, edit, delete custom templates -### Design Decisions +**Pages** +- `/templates` - Template browser and app creation +- `/templates/new` - Register custom template -1. **Separate data files per section** (not one giant file) β€” each section has independent update cadence and the genome file (82KB) is already large -2. **Derivation over duplication** β€” chronotype reads from genome.json at query time rather than copying marker data. Identity service is the join layer -3. **Progressive disclosure** β€” Identity tab shows summary cards; deep dives are sub-routes, not modals (per CLAUDE.md: all views must be deep-linkable) -4. **LLM-assisted but user-confirmed** β€” aesthetic themes extracted by LLM from media lists are suggestions, not gospel. User confirms/edits -5. **No new dependencies** β€” uses existing Zod, Express, React, Lucide stack -6. **Genome data stays read-only** β€” identity service reads genome markers but never writes to genome.json -7. **Taste data consolidation** β€” P2 created `taste-profile.json` (5 sections). P2.5 adds twin-aware personalized questions. Long-term, taste data migrates into `aesthetics.json` as the canonical aesthetic profile, with taste-profile.json as the raw questionnaire responses -8. **Weighted chronotype confidence** β€” 5 sleep markers weighted by specificity: CRY1 (strongest DSPD signal) > CLOCK (evening tendency) > PER2 (circadian period) > MTNR1B (melatonin coupling) > DEC2 (duration, not phase). Behavioral data from daily_routines enrichment gets equal weight to genetic composite +**API Endpoints** +| Route | Description | +|-------|-------------| +| GET /api/templates | List all templates | +| POST /api/templates | Add custom template | +| POST /api/templates/create | Create app from template | +| DELETE /api/templates/:id | Remove custom template | --- @@ -972,138 +125,27 @@ CoS injects identity context into agent briefings when relevant - [Troubleshooting](./docs/TROUBLESHOOTING.md) - Common issues and solutions ### Feature Documentation +- [Agent Skills](./docs/features/agent-skills.md) - Task-type-specific prompt templates and routing - [App Wizard](./docs/features/app-wizard.md) - Register apps and create from templates - [Autofixer](./docs/features/autofixer.md) - Autonomous crash detection and repair - [Brain System](./docs/features/brain-system.md) - Second-brain capture and classification +- [Browser Management](./docs/features/browser.md) - CDP/Playwright browser management - [Chief of Staff](./docs/features/chief-of-staff.md) - Autonomous agent orchestration - [CoS Agent Runner](./docs/features/cos-agent-runner.md) - Isolated agent process management - [CoS Enhancement](./docs/features/cos-enhancement.md) - M35 hybrid memory, missions, thinking levels - [Digital Twin](./docs/features/digital-twin.md) - Quantitative personality modeling - [Error Handling](./docs/features/error-handling.md) - Graceful error handling with auto-fix +- [Identity System](./docs/features/identity-system.md) - Unified identity architecture (M42 spec) - [Memory System](./docs/features/memory-system.md) - Semantic memory with LLM classification - [Prompt Manager](./docs/features/prompt-manager.md) - Customizable AI prompts - [Soul System](./docs/features/soul-system.md) - Digital twin identity scaffold -- [Browser Management](./docs/features/browser.md) - CDP/Playwright browser management --- ## Next Actions -Based on recent work and incomplete milestones: - -1. **Complete M7: App Templates** - Implement template management UI and app scaffolding from templates -2. **Digital Twin P3: Behavioral Feedback Loop** - Add "sounds like me" response validation and adaptive weighting -3. **Vision API Polish** - Continue refining LM Studio vision integration based on test results -4. **Memory Consolidation** - Implement automatic memory consolidation for similar memories -5. **M40: Agent Skill System** - See details below - ---- - -## M40: Agent Skill System - -Inspired by [OpenAI Skills & Shell Tips](https://developers.openai.com/blog/skills-shell-tips), this milestone improves CoS agent accuracy and reliability through better task routing, prompt specificity, and context management. - -### P1: Task-Type-Specific Agent Prompts (Skill Templates) βœ… -Created specialized prompt templates per task category with routing, examples, and guidelines: -- **Routing descriptions**: "Use when..." / "Don't use when..." sections in each skill template -- **Embedded examples**: Worked examples of successful completions for each task type -- **Task-specific guidelines**: Security audit includes OWASP checklist; feature includes validation/convention requirements; refactor emphasizes behavior preservation - -**Implemented**: -- Added `data/prompts/skills/` directory with 6 task-type templates: `bug-fix.md`, `feature.md`, `security-audit.md`, `refactor.md`, `documentation.md`, `mobile-responsive.md` -- Added `detectSkillTemplate()` and `loadSkillTemplate()` in `subAgentSpawner.js` with keyword-based matching (ordered by specificity β€” security/mobile before generic bug-fix/feature) -- Updated `buildAgentPrompt()` to inject matched skill template into both the Mustache template system and the fallback template -- Updated `cos-agent-briefing.md` with `{{#skillSection}}` conditional block -- Templates only loaded when matched to avoid token inflation - -### P2: Agent Context Compaction βœ… -Long-running agents can hit context limits causing failures. Add proactive context management: -- Pass `--max-turns` or equivalent context budget hints when spawning agents -- Track agent output length and detect when agents are approaching context limits -- βœ… Add compaction metadata to agent error analysis so retries can include "compact context" instructions -- βœ… Update the agent briefing to include explicit output format constraints for verbose task types - -### P3: Negative Example Coverage for Task Routing βœ… -Improve task-to-model routing accuracy by adding negative examples to the model selection logic: -- βœ… Document which task types should NOT use light models (already partially done, but formalize it) -- βœ… Add "anti-patterns" to task learning: when a task type fails with a specific model, record the negative signal via `routingAccuracy` cross-reference (taskType Γ— modelTier) -- βœ… Surface routing accuracy metrics in the Learning tab so the user can see misroutes -- βœ… Enhanced `suggestModelTier()` to use negative signal data for smarter tier avoidance - -### P4: Deterministic Workflow Skills βœ… -For recurring autonomous jobs (daily briefing, git maintenance, security audit, app improvement), encode the full workflow as a deterministic skill: -- βœ… Each skill defines exact steps, expected outputs, and success criteria in `data/prompts/skills/jobs/` -- βœ… Prevents prompt drift across runs β€” jobs now load structured skill templates instead of inline prompt strings -- βœ… Skills are versioned and editable via the Prompt Manager UI (Job Skills tab) -- βœ… `generateTaskFromJob()` builds effective prompts from skill template sections (Steps, Expected Outputs, Success Criteria) -- βœ… API routes added: GET/PUT `/api/prompts/skills/jobs/:name`, preview via GET `/api/prompts/skills/jobs/:name/preview` - ---- - -## Error Handling Summary - -The server implements comprehensive error handling: -- **asyncHandler**: All routes wrapped with error handler that catches uncaught errors -- **ServerError**: Custom error class with status, code, severity, and context -- **Socket.IO Events**: Errors broadcast to UI via `error:occurred` event -- **Process Handlers**: Unhandled rejections and uncaught exceptions emit socket events -- **Logging**: Errors logged with emoji prefixes, no server crashes -- See [Error Handling](./docs/features/error-handling.md) for details - ---- - -## Security Audit (2026-01-08) - -Comprehensive security audit performed by CoS Self-Improvement agent. - -### Vulnerabilities Found and Fixed - -1. **Command Injection in Git Service** (CRITICAL - FIXED) - - File: `server/services/git.js` - - Fix: Replaced `exec()` with `spawn()` and `shell: false`, added path validation - -2. **Path Traversal in Screenshots Route** (HIGH - FIXED) - - File: `server/routes/screenshots.js` - - Fix: Added `sanitizeFilename()` and path validation - -### Secure Patterns (No Issues Found) -- Command execution uses allowlist -- PM2 operations use spawn with shell: false -- Input validation with Zod schemas -- No dangerouslySetInnerHTML in React -- API keys stored server-side only -- JSON content type required for mutations - ---- - -## Planned Feature Details - -### M7: App Templates - -Templates allow creating new apps from pre-configured project structures. - -**Built-in Template: PortOS Stack** -- Express.js API server -- React + Vite frontend -- Tailwind CSS styling -- PM2 ecosystem configuration -- GitHub Actions CI/CD workflows -- Auto-versioning system - -**Features** -1. Template Selection - Browse available templates with feature descriptions -2. App Creation - Scaffold new project with chosen name and target directory -3. Custom Templates - Register additional templates from local paths -4. Template Management - View, edit, delete custom templates - -**Pages** -- `/templates` - Template browser and app creation -- `/templates/new` - Register custom template - -**API Endpoints** -| Route | Description | -|-------|-------------| -| GET /api/templates | List all templates | -| POST /api/templates | Add custom template | -| POST /api/templates/create | Create app from template | -| DELETE /api/templates/:id | Remove custom template | +1. **M42 P1: Identity Orchestrator & Chronotype** - Create identity.json, chronotype.json, identity service, derive chronotype from 5 genome sleep markers. See [Identity System](./docs/features/identity-system.md) +2. **M42 P2.5: Personalized Taste Prompting** - Enhance TasteTab with twin-aware follow-up questions using identity context from existing documents +3. **M7: App Templates** - Implement template management UI and app scaffolding from templates +4. **M34 P3: Behavioral Feedback Loop** - Add "sounds like me" response validation and adaptive weighting +5. **M42 P3: Mortality-Aware Goal Tracking** - Birth date + genome longevity markers for urgency-scored goals diff --git a/client/package.json b/client/package.json index 6deb94ed..3f7e1bca 100644 --- a/client/package.json +++ b/client/package.json @@ -1,6 +1,6 @@ { "name": "portos-client", - "version": "0.14.21", + "version": "0.15.15", "private": true, "type": "module", "scripts": { diff --git a/client/src/components/CosDashboardWidget.jsx b/client/src/components/CosDashboardWidget.jsx index 7a0295d0..176d5092 100644 --- a/client/src/components/CosDashboardWidget.jsx +++ b/client/src/components/CosDashboardWidget.jsx @@ -1,4 +1,4 @@ -import { useState, useEffect, memo } from 'react'; +import { useState, memo } from 'react'; import { Link } from 'react-router-dom'; import { CheckCircle, @@ -15,40 +15,26 @@ import { Activity } from 'lucide-react'; import * as api from '../services/api'; +import { useAutoRefetch } from '../hooks/useAutoRefetch'; /** * CosDashboardWidget - Compact CoS status widget for the main Dashboard * Shows today's progress, streak status, learning health, CoS running state, and recent tasks */ const CosDashboardWidget = memo(function CosDashboardWidget() { - const [summary, setSummary] = useState(null); - const [learningSummary, setLearningSummary] = useState(null); - const [recentTasks, setRecentTasks] = useState(null); - const [activityCalendar, setActivityCalendar] = useState(null); - const [loading, setLoading] = useState(true); - const [tasksExpanded, setTasksExpanded] = useState(false); - - useEffect(() => { - const loadData = async () => { - const silent = { silent: true }; - const [quickData, learningData, tasksData, calendarData] = await Promise.all([ - api.getCosQuickSummary(silent).catch(() => null), - api.getCosLearningSummary(silent).catch(() => null), - api.getCosRecentTasks(5, silent).catch(() => null), - api.getCosActivityCalendar(8, silent).catch(() => null) - ]); - setSummary(quickData); - setLearningSummary(learningData); - setRecentTasks(tasksData); - setActivityCalendar(calendarData); - setLoading(false); - }; + const { data: dashData, loading } = useAutoRefetch(async () => { + const silent = { silent: true }; + const [summary, learningSummary, recentTasks, activityCalendar] = await Promise.all([ + api.getCosQuickSummary(silent).catch(() => null), + api.getCosLearningSummary(silent).catch(() => null), + api.getCosRecentTasks(5, silent).catch(() => null), + api.getCosActivityCalendar(8, silent).catch(() => null) + ]); + return { summary, learningSummary, recentTasks, activityCalendar }; + }, 30000); - loadData(); - // Refresh every 30 seconds - const interval = setInterval(loadData, 30000); - return () => clearInterval(interval); - }, []); + const { summary, learningSummary, recentTasks, activityCalendar } = dashData ?? {}; + const [tasksExpanded, setTasksExpanded] = useState(false); // Don't render while loading if (loading) { diff --git a/client/src/components/DecisionLogWidget.jsx b/client/src/components/DecisionLogWidget.jsx index 9c44fae7..9e9728c0 100644 --- a/client/src/components/DecisionLogWidget.jsx +++ b/client/src/components/DecisionLogWidget.jsx @@ -1,4 +1,4 @@ -import { useState, useEffect, memo } from 'react'; +import { useState, memo } from 'react'; import { Link } from 'react-router-dom'; import { Eye, @@ -12,29 +12,19 @@ import { Zap } from 'lucide-react'; import * as api from '../services/api'; +import { useAutoRefetch } from '../hooks/useAutoRefetch'; /** * DecisionLogWidget - Shows transparency into CoS decision-making * Displays why tasks were skipped, intervals adjusted, or alternatives chosen */ const DecisionLogWidget = memo(function DecisionLogWidget() { - const [summary, setSummary] = useState(null); - const [loading, setLoading] = useState(true); + const { data: summary, loading } = useAutoRefetch( + () => api.getCosDecisionSummary({ silent: true }).catch(() => null), + 60000 + ); const [expanded, setExpanded] = useState(false); - useEffect(() => { - const loadData = async () => { - const data = await api.getCosDecisionSummary({ silent: true }).catch(() => null); - setSummary(data); - setLoading(false); - }; - - loadData(); - // Refresh every 60 seconds - const interval = setInterval(loadData, 60000); - return () => clearInterval(interval); - }, []); - // Don't render while loading or if no data if (loading || !summary) { return null; diff --git a/client/src/components/GoalProgressWidget.jsx b/client/src/components/GoalProgressWidget.jsx index abfd93ed..3b3ef676 100644 --- a/client/src/components/GoalProgressWidget.jsx +++ b/client/src/components/GoalProgressWidget.jsx @@ -1,4 +1,4 @@ -import { useState, useEffect, memo } from 'react'; +import { memo } from 'react'; import { Link } from 'react-router-dom'; import { Target, @@ -7,27 +7,17 @@ import { AlertTriangle } from 'lucide-react'; import * as api from '../services/api'; +import { useAutoRefetch } from '../hooks/useAutoRefetch'; /** * GoalProgressWidget - Shows progress toward user goals on the dashboard * Maps completed CoS tasks to goal categories from COS-GOALS.md */ const GoalProgressWidget = memo(function GoalProgressWidget() { - const [progress, setProgress] = useState(null); - const [loading, setLoading] = useState(true); - - useEffect(() => { - const loadData = async () => { - const data = await api.getCosGoalProgressSummary({ silent: true }).catch(() => null); - setProgress(data); - setLoading(false); - }; - - loadData(); - // Refresh every 60 seconds - const interval = setInterval(loadData, 60000); - return () => clearInterval(interval); - }, []); + const { data: progress, loading } = useAutoRefetch( + () => api.getCosGoalProgressSummary({ silent: true }).catch(() => null), + 60000 + ); // Don't render while loading or if no goals if (loading || !progress?.goals?.length) { diff --git a/client/src/components/UpcomingTasksWidget.jsx b/client/src/components/UpcomingTasksWidget.jsx index a38065c0..984e4044 100644 --- a/client/src/components/UpcomingTasksWidget.jsx +++ b/client/src/components/UpcomingTasksWidget.jsx @@ -1,4 +1,4 @@ -import { useState, useEffect, memo } from 'react'; +import { memo } from 'react'; import { Link } from 'react-router-dom'; import { Clock, @@ -10,30 +10,20 @@ import { Sparkles } from 'lucide-react'; import * as api from '../services/api'; +import { useAutoRefetch } from '../hooks/useAutoRefetch'; /** * UpcomingTasksWidget - Shows a preview of upcoming scheduled tasks * Helps users understand what the CoS will work on next */ const UpcomingTasksWidget = memo(function UpcomingTasksWidget() { - const [upcoming, setUpcoming] = useState([]); - const [loading, setLoading] = useState(true); - - useEffect(() => { - const loadData = async () => { - const data = await api.getCosUpcomingTasks(6).catch(() => []); - setUpcoming(data); - setLoading(false); - }; - - loadData(); - // Refresh every 60 seconds - const interval = setInterval(loadData, 60000); - return () => clearInterval(interval); - }, []); + const { data: upcoming, loading } = useAutoRefetch( + () => api.getCosUpcomingTasks(6).catch(() => []), + 60000 + ); // Don't render while loading or if no upcoming tasks - if (loading || !upcoming.length) { + if (loading || !upcoming?.length) { return null; } @@ -103,9 +93,9 @@ const UpcomingTasksWidget = memo(function UpcomingTasksWidget() { {/* Task List */}
{/* Ready Tasks */} - {readyTasks.slice(0, 3).map((task) => ( + {readyTasks.slice(0, 3).map((task, index) => (