From 78ae9ad0f639742e766ad3098d467578de603103 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Feb 2026 20:59:01 +0000 Subject: [PATCH 01/31] docs: archive changelog for v0.14.21 [skip ci] --- .changelog/{v0.14.x.md => v0.14.21.md} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename .changelog/{v0.14.x.md => v0.14.21.md} (99%) diff --git a/.changelog/v0.14.x.md b/.changelog/v0.14.21.md similarity index 99% rename from .changelog/v0.14.x.md rename to .changelog/v0.14.21.md index 684f5ce1..88d22981 100644 --- a/.changelog/v0.14.x.md +++ b/.changelog/v0.14.21.md @@ -1,4 +1,4 @@ -# Release v0.14.x - Changelog +# Release v0.14.21 - Changelog Released: YYYY-MM-DD From e7303d880885d63d8ff2813462f99a408c626275 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Feb 2026 20:59:24 +0000 Subject: [PATCH 02/31] build: prep v0.15.0 for next release [skip ci] --- client/package.json | 2 +- package-lock.json | 8 ++++---- package.json | 2 +- server/package.json | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/client/package.json b/client/package.json index 6deb94ed..a2df0b19 100644 --- a/client/package.json +++ b/client/package.json @@ -1,6 +1,6 @@ { "name": "portos-client", - "version": "0.14.21", + "version": "0.15.0", "private": true, "type": "module", "scripts": { diff --git a/package-lock.json b/package-lock.json index 2d682b2c..71c877b7 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "portos", - "version": "0.14.21", + "version": "0.15.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "portos", - "version": "0.14.21", + "version": "0.15.0", "license": "MIT", "workspaces": [ "packages/*", @@ -29,7 +29,7 @@ }, "client": { "name": "portos-client", - "version": "0.14.21", + "version": "0.15.0", "dependencies": { "@dnd-kit/core": "^6.3.1", "@dnd-kit/sortable": "^10.0.0", @@ -9368,7 +9368,7 @@ }, "server": { "name": "portos-server", - "version": "0.14.21", + "version": "0.15.0", "dependencies": { "axios": "^1.7.9", "cors": "^2.8.5", diff --git a/package.json b/package.json index 4dd8f900..c79026c8 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "portos", - "version": "0.14.21", + "version": "0.15.0", "private": true, "description": "Local dev machine App OS portal", "author": "Adam Eivy (@antic|@atomantic)", diff --git a/server/package.json b/server/package.json index 359755ef..ed1095a6 100644 --- a/server/package.json +++ b/server/package.json @@ -1,6 +1,6 @@ { "name": "portos-server", - "version": "0.14.21", + "version": "0.15.0", "private": true, "type": "module", "scripts": { From 3816280d2a37dfbd0f4c1961b06df98a125f57ed Mon Sep 17 00:00:00 2001 From: Adam Eivy Date: Wed, 18 Feb 2026 13:58:17 -0800 Subject: [PATCH 03/31] docs: reorganize PLAN.md, extract feature docs, add scope boundary - Extracted M42 Identity System spec (~600 lines) to docs/features/identity-system.md - Extracted M40 Agent Skills summary to docs/features/agent-skills.md - Moved M40 from Planned to Completed (all P1-P4 done) - Removed M44 pump.fun content (belongs in pump.funner repo) - Removed orphaned research docs (pumpfun, kalshibot) for other projects - Added Scope Boundary rule to CLAUDE.md preventing cross-repo doc pollution - Updated Next Actions to reflect current priorities - Created v0.15.x changelog --- .changelog/v0.15.x.md | 18 + CLAUDE.md | 4 + PLAN.md | 1032 +---------------- docs/features/agent-skills.md | 42 + docs/features/identity-system.md | 612 ++++++++++ .../kalshibot-health-check-2026-02-17.md | 175 --- docs/research/pumpfun-data-sources.md | 459 -------- 7 files changed, 713 insertions(+), 1629 deletions(-) create mode 100644 .changelog/v0.15.x.md create mode 100644 docs/features/agent-skills.md create mode 100644 docs/features/identity-system.md delete mode 100644 docs/research/kalshibot-health-check-2026-02-17.md delete mode 100644 docs/research/pumpfun-data-sources.md diff --git a/.changelog/v0.15.x.md b/.changelog/v0.15.x.md new file mode 100644 index 00000000..a5fc1270 --- /dev/null +++ b/.changelog/v0.15.x.md @@ -0,0 +1,18 @@ +# Release v0.15.x - Changelog + +Released: YYYY-MM-DD + +## Overview + +Plan cleanup and documentation reorganization. + +## Improvements + +### Plan & Documentation Cleanup +- **Extracted M42 spec** to `docs/features/identity-system.md` -- removed ~600 lines from PLAN.md +- **Extracted M40 summary** to `docs/features/agent-skills.md` -- completed milestone moved to feature docs +- **Moved M40 to Completed** -- all 4 phases (skill templates, context compaction, negative routing, deterministic workflows) were done +- **Removed pump.fun content** (M44) -- belongs in pump.funner repo, not PortOS +- **Removed orphaned research docs** -- `pumpfun-data-sources.md` and `kalshibot-health-check-2026-02-17.md` were for other projects +- **Added Scope Boundary rule** to CLAUDE.md -- CoS agents must write research/plans/docs for managed apps in those apps' repos, not PortOS +- **Updated Next Actions** -- focused on M42 Identity System, M7 App Templates, M34 behavioral feedback diff --git a/CLAUDE.md b/CLAUDE.md index 24871c96..4225d302 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -60,6 +60,10 @@ PortOS depends on `portos-ai-toolkit` as an npm module for AI provider managemen - The toolkit uses spread in `updateProvider()` so existing providers preserve custom fields, but `createProvider()` has an explicit field list - After updating the toolkit, run `npm update portos-ai-toolkit` in PortOS to pull changes +## Scope Boundary + +When CoS agents or AI tools work on managed apps outside PortOS, all research, plans, docs, and code for those apps must be written to the target app's own repository/directory -- never to this repo. PortOS stores only its own features, plans, and documentation. If an agent generates a PLAN.md, research doc, or feature spec for another app, it goes in that app's directory. + ## Code Conventions - **No try/catch** - errors bubble to centralized middleware diff --git a/PLAN.md b/PLAN.md index b4aae25b..d2689a9d 100644 --- a/PLAN.md +++ b/PLAN.md @@ -66,896 +66,49 @@ pm2 logs - [x] **M37**: Autonomous Jobs - Recurring scheduled jobs that the CoS executes proactively using digital twin identity - [x] **M38**: Agent Tools - AI content generation, feed browsing, and autonomous engagement for Moltbook agents - [x] **M39**: Agent-Centric Drill-Down - Redesigned Agents section with agent-first hierarchy, deep-linkable URLs, and scoped sub-tabs -- [x] **M41**: CyberCity Immersive Overhaul - Procedural synthwave audio, enhanced post-processing (chromatic aberration, film grain, color grading), reflective wet-street ground, settings system, and atmosphere enhancements -- [x] **M43**: Moltworld Platform Support - Second platform integration for AI agents in a shared voxel world with movement, building, thinking, messaging, and SIM token economy +- [x] **M40**: Agent Skill System - Task-type-specific prompts, context compaction, negative routing examples, deterministic workflow skills. See [Agent Skills](./docs/features/agent-skills.md) +- [x] **M41**: CyberCity Immersive Overhaul - Procedural synthwave audio, enhanced post-processing, reflective wet-street ground, settings system +- [x] **M43**: Moltworld Platform Support - Second platform integration for AI agents in a shared voxel world ### Planned - [ ] **M7**: App Templates - Template management and app scaffolding from templates - [ ] **M34 P3,P5-P7**: Digital Twin - Behavioral feedback loop, multi-modal capture, advanced testing, personas -- [ ] **M40**: Agent Skill System - Task-type-specific prompt templates with routing logic, negative examples, and embedded workflows for improved agent accuracy and reliability -- [ ] **M42**: Unified Digital Twin Identity System - Connect Genome (117 markers, 32 categories), Chronotype (5 sleep markers + behavioral), Aesthetic Taste (P2 complete, P2.5 adds twin-aware prompting), and Mortality-Aware Goals into a single coherent Identity architecture with cross-insights engine -- [ ] **M44**: Pump.fun Launch Tracking Engine - Real-time pump.fun token detection via Helius webhooks, token enrichment via Birdeye, sniper account tracking, and launch analytics dashboard +- [ ] **M42**: Unified Digital Twin Identity System - See [Identity System](./docs/features/identity-system.md) --- -## M44: Pump.fun Launch Tracking Engine - -### Motivation - -The Solana memecoin ecosystem on pump.fun generates thousands of token launches daily. A small percentage become high-performers (10x+ returns). Sniper accounts — wallets that consistently buy into winning tokens within seconds of launch — represent a detectable signal for launch quality. This engine detects new launches in real-time, tracks token performance, inventories sniper accounts, and builds a data foundation for predicting upcoming high-performing launches. - -**Brain Project**: 467fbe07 — research complete (see `docs/research/pumpfun-data-sources.md`) - -### Data Source Selection - -| Source | Role | Tier | Cost/mo | -|--------|------|------|---------| -| **Helius** | Primary: real-time token detection, transaction monitoring | Developer | $49 | -| **Birdeye** | Enrichment: market cap, volume, security scores, OHLCV | Starter | $99 | -| **pump.fun Direct** | Supplement: creator metadata (sparingly, no SLA) | Free | $0 | - -**Total**: $148/mo for Phase 1+2 - -### Data Model - -All data persists to `data/pumpfun/` following PortOS conventions (entity stores with `records` keyed by ID, JSONL for append-heavy logs, 2s cache TTL). - -#### `data/pumpfun/meta.json` — Configuration - -```json -{ - "version": "1.0.0", - "helius": { - "apiKey": null, - "webhookId": null, - "webhookUrl": null, - "programId": "6EF8rrecthR5Dkzon8Nwu78hRvfCKubJ14M5uBEwF6P", - "tier": "developer", - "rpsLimit": 10 - }, - "birdeye": { - "apiKey": null, - "tier": "starter", - "rpsLimit": 15 - }, - "tracking": { - "enabled": false, - "enrichmentIntervalMs": 60000, - "snapshotIntervalMs": 300000, - "retentionDays": 90, - "autoEnrich": true - }, - "filters": { - "minHolders": 10, - "minVolume24h": 1000, - "minLiquiditySol": 5, - "excludeRugPull": true - }, - "alerts": { - "volumeSpikeThreshold": 5, - "holderSpikeThreshold": 3, - "sniperOverlapThreshold": 3 - } -} -``` - -#### `data/pumpfun/tokens.json` — Tracked Tokens (Entity Store) - -```json -{ - "records": { - "TokenMintAddress44chars": { - "mint": "TokenMintAddress44chars", - "symbol": "PUMP", - "name": "Pump Token", - "creator": "CreatorWalletAddress", - "launchSignature": "txSignature", - "launchSlot": 123456789, - "launchAt": "2026-02-17T12:00:00.000Z", - "bondingCurve": { - "address": "BondingCurveAccountAddress", - "graduated": false, - "graduatedAt": null - }, - "status": "active", - "performance": { - "athMultiple": null, - "athPrice": null, - "athAt": null, - "currentPrice": null, - "priceAtLaunch": null - }, - "metrics": { - "holders": 0, - "volume24h": 0, - "marketCap": 0, - "liquidity": 0, - "securityScore": null - }, - "snipers": [], - "tags": [], - "enrichedAt": null, - "createdAt": "2026-02-17T12:00:00.000Z", - "updatedAt": "2026-02-17T12:00:00.000Z" - } - } -} -``` - -Key: mint address (not UUID) since tokens are uniquely identified by their on-chain mint. - -#### `data/pumpfun/snipers.json` — Sniper Account Inventory (Entity Store) - -```json -{ - "records": { - "WalletAddress": { - "wallet": "WalletAddress", - "label": null, - "stats": { - "totalSnipes": 0, - "successRate": 0, - "avgEntryDelaySec": 0, - "avgReturnMultiple": 0, - "bestReturn": null, - "worstReturn": null, - "activeSince": null - }, - "recentTokens": [], - "reputation": "unknown", - "tags": [], - "createdAt": "2026-02-17T12:00:00.000Z", - "updatedAt": "2026-02-17T12:00:00.000Z" - } - } -} -``` - -Reputation levels: `unknown` → `newcomer` → `consistent` → `elite` (based on success rate + volume). - -#### `data/pumpfun/events.jsonl` — Trade & Price Events (Append Log) - -```jsonl -{"id":"evt-uuid","mint":"TokenMint","type":"launch","creator":"Wallet","signature":"txSig","slot":123456789,"timestamp":"2026-02-17T12:00:00.000Z"} -{"id":"evt-uuid","mint":"TokenMint","type":"trade","side":"buy","wallet":"Wallet","amountSol":1.5,"amountTokens":1000000,"signature":"txSig","slot":123456790,"timestamp":"2026-02-17T12:00:01.000Z"} -{"id":"evt-uuid","mint":"TokenMint","type":"enrichment","holders":250,"volume24h":50000,"marketCap":120000,"liquidity":5000,"securityScore":85,"source":"birdeye","timestamp":"2026-02-17T12:01:00.000Z"} -{"id":"evt-uuid","mint":"TokenMint","type":"graduation","bondingCurve":"Address","signature":"txSig","timestamp":"2026-02-17T14:00:00.000Z"} -``` - -Event types: `launch`, `trade`, `enrichment`, `graduation`, `sniper_detected`, `alert`. - -### MVP Architecture - -``` - ┌──────────────────────────────────┐ - │ Helius Webhooks │ - │ (pump.fun program monitoring) │ - └──────────────┬───────────────────┘ - │ - POST /api/pumpfun/webhook - (new token + trade events) - │ - ▼ -┌──────────────────────────────────────────────────────────────┐ -│ PortOS Server │ -│ │ -│ ┌─────────────────┐ ┌──────────────────┐ │ -│ │ Webhook Route │──▶│ PumpFun Service │ │ -│ │ (validates + │ │ - detectToken() │ │ -│ │ parses events) │ │ - recordTrade() │ │ -│ └─────────────────┘ │ - detectSniper() │ │ -│ │ - enrichToken() │ │ -│ ┌─────────────────┐ │ - getStats() │ │ -│ │ REST Routes │──▶│ - alertCheck() │ │ -│ │ GET /tokens │ └────────┬─────────┘ │ -│ │ GET /snipers │ │ │ -│ │ GET /stats │ ▼ │ -│ └─────────────────┘ ┌──────────────────┐ │ -│ │ Data Layer │ │ -│ ┌─────────────────┐ │ tokens.json │ │ -│ │ Enrichment │ │ snipers.json │ │ -│ │ Scheduler │──▶│ events.jsonl │ │ -│ │ (polls Birdeye │ │ meta.json │ │ -│ │ for active │ └──────────────────┘ │ -│ │ tokens) │ │ -│ └─────────────────┘ │ -│ │ -│ ┌─────────────────┐ ┌──────────────────┐ │ -│ │ Socket.IO │──▶│ Client UI │ │ -│ │ pumpfun:token │ │ /pumpfun │ │ -│ │ pumpfun:trade │ │ /pumpfun/tokens │ │ -│ │ pumpfun:alert │ │ /pumpfun/snipers │ │ -│ └─────────────────┘ └──────────────────┘ │ -└──────────────────────────────────────────────────────────────┘ - │ - Token enrichment - (market data, security) - │ - ▼ - ┌──────────────────────────────────┐ - │ Birdeye REST API │ - │ token_overview, token_security, │ - │ OHLCV, price history │ - └──────────────────────────────────┘ -``` - -### Implementation Phases - -#### P1: Token Detection (Helius webhook receiver) -- Create `data/pumpfun/` directory with `meta.json`, `tokens.json`, `snipers.json`, `events.jsonl` -- Create `server/services/pumpfun.js` — core service with token detection, event logging, file I/O with caching -- Create `server/routes/pumpfun.js` — webhook endpoint (`POST /api/pumpfun/webhook`) + REST endpoints -- Add Zod schemas for webhook payload validation and API inputs in `server/lib/validation.js` -- Parse Helius enhanced transaction events: extract mint address, creator, initial supply from `TOKEN_MINT` type events where `source` is `PUMP_FUN` -- Persist detected tokens to `tokens.json`, log launch events to `events.jsonl` -- Mount routes in `server/index.js` -- Emit Socket.IO `pumpfun:token` events for real-time UI updates - -#### P2: Token Enrichment (Birdeye integration) -- Add enrichment scheduler to `pumpfun.js` — polls Birdeye `/defi/token_overview` and `/defi/token_security` for active tokens -- Update token records with market cap, volume, holder count, security score, liquidity -- Track ATH (all-time high) price and multiple for each token -- Log enrichment snapshots to `events.jsonl` for historical tracking -- Detect bonding curve graduation events -- Filter out rug-pulls using Birdeye security endpoint (mint authority, freeze authority checks) - -#### P3: Sniper Detection & Analytics -- Parse early buy transactions (within first 60s of launch) from Helius trade events -- Cross-reference buyer wallets across multiple launches to identify repeat snipers -- Build sniper reputation scores: success rate, avg entry delay, avg return multiple -- Track sniper overlap — when 3+ known snipers enter the same token, flag as high signal -- Create `/pumpfun/snipers` REST endpoint for sniper leaderboard data -- Emit `pumpfun:alert` Socket.IO events when sniper overlap threshold exceeded - -#### P4: Dashboard UI -- Create `client/src/pages/PumpFun.jsx` — main page with tab navigation -- `/pumpfun/tokens` — live token feed with status, metrics, performance columns, sortable -- `/pumpfun/snipers` — sniper leaderboard with wallet, stats, recent tokens -- `/pumpfun/stats` — aggregate dashboard: launches/day, avg performer, top tokens, sniper activity -- `/pumpfun/settings` — API key management, filter config, alert thresholds -- Real-time updates via Socket.IO subscription -- Deep-linkable routes per CLAUDE.md conventions - -### Files to Create - -**New files:** -- `data/pumpfun/meta.json` — configuration -- `data/pumpfun/tokens.json` — token entity store -- `data/pumpfun/snipers.json` — sniper entity store -- `data/pumpfun/events.jsonl` — event log -- `server/services/pumpfun.js` — core service -- `server/routes/pumpfun.js` — API routes -- `client/src/pages/PumpFun.jsx` — dashboard page - -**Modified files:** -- `server/lib/validation.js` — add pumpfun Zod schemas -- `server/index.js` — mount pumpfun routes -- `client/src/App.jsx` — add PumpFun route -- `client/src/components/Sidebar.jsx` — add PumpFun nav item - -### Design Decisions - -1. **Mint address as entity key** (not UUID) — tokens are uniquely identified by their on-chain mint address, avoiding a mapping layer -2. **Webhook-first** — push model from Helius eliminates polling overhead and gives ~1s detection latency -3. **Enrichment on schedule, not inline** — Birdeye calls happen on a timer (60s default) for active tokens rather than blocking the webhook handler -4. **JSONL for events** — trade/price events are high-volume, append-only; JSONL avoids rewriting large files -5. **Sniper detection is cross-launch** — individual trades are meaningless; the value is in correlating the same wallet appearing in multiple successful early entries -6. **No external DB** — consistent with PortOS's JSON file persistence pattern; suitable for the expected data volume (hundreds of tokens/day, not millions) -7. **Security filtering built-in** — Birdeye's token_security endpoint flags rug-pull indicators early, preventing noise in the tracking data - ---- - -## M42: Unified Digital Twin Identity System - -### Motivation - -Four separate workstreams converge on the same vision: a personal digital twin that knows *who you are* biologically, temporally, aesthetically, and existentially. Today these live as disconnected features: - -| Subsystem | Current State | Location | -|-----------|--------------|----------| -| **Genome** | Fully implemented: 23andMe upload, 117 curated SNP markers across 32 categories, ClinVar integration, epigenetic tracking | `server/services/genome.js`, `GenomeTab.jsx`, `data/digital-twin/genome.json` | -| **Chronotype** | Genetic data ready: 5 sleep/circadian markers (CLOCK rs1801260, DEC2 rs57875989, PER2 rs35333999, CRY1 rs2287161, MTNR1B rs10830963) + `daily_routines` enrichment category. Derivation service not yet built | `curatedGenomeMarkers.js` sleep category, `ENRICHMENT_CATEGORIES.daily_routines` | -| **Aesthetic Taste** | P2 complete: Taste questionnaire with 5 sections (movies, music, visual_art, architecture, food), conversational Q&A, AI summary generation. Enrichment categories also feed taste data from book/movie/music lists | `TasteTab.jsx`, `taste-questionnaire.js`, `data/digital-twin/taste-profile.json` | -| **Goal Tracking** | Partially exists: `COS-GOALS.md` for CoS missions, `TASKS.md` for user tasks, `EXISTENTIAL.md` soul doc | `data/COS-GOALS.md`, `data/TASKS.md`, `data/digital-twin/EXISTENTIAL.md` | - -These should be unified under a single **Identity** architecture so the twin can reason across all dimensions (e.g., "your CLOCK gene says evening chronotype — schedule deep work after 8pm" or "given your longevity markers and age, here's how to prioritize your 10-year goals"). - -### Data Model - -#### Entity: `identity.json` (new, top-level twin orchestration) - -```json -{ - "version": "1.0.0", - "createdAt": "2026-02-12T00:00:00.000Z", - "updatedAt": "2026-02-12T00:00:00.000Z", - "sections": { - "genome": { "status": "active", "dataFile": "genome.json", "markerCount": 117, "categoryCount": 32, "lastScanAt": "..." }, - "chronotype": { "status": "active", "dataFile": "chronotype.json", "derivedFrom": ["genome:sleep", "enrichment:daily_routines"] }, - "aesthetics": { "status": "active", "dataFile": "aesthetics.json", "derivedFrom": ["enrichment:aesthetics", "enrichment:favorite_books", "enrichment:favorite_movies", "enrichment:music_taste"] }, - "goals": { "status": "active", "dataFile": "goals.json" } - }, - "crossLinks": [] -} -``` - -#### Entity: Chronotype Profile (`chronotype.json`) - -Derived from genome sleep markers + daily_routines enrichment answers + user overrides. - -```json -{ - "chronotype": "evening", - "confidence": 0.75, - "sources": { - "genetic": { - "clockGene": { "rsid": "rs1801260", "genotype": "T/C", "signal": "mild_evening" }, - "dec2": { "rsid": "rs57875989", "genotype": "G/G", "signal": "standard_sleep_need" }, - "per2": { "rsid": "rs35333999", "genotype": "C/C", "signal": "standard_circadian" }, - "cry1": { "rsid": "rs2287161", "genotype": "C/C", "signal": "standard_period" }, - "mtnr1b": { "rsid": "rs10830963", "genotype": "T/T", "signal": "normal_melatonin_receptor" } - }, - "behavioral": { - "preferredWakeTime": "08:30", - "preferredSleepTime": "00:30", - "peakFocusWindow": "20:00-02:00", - "energyDipWindow": "14:00-16:00" - } - }, - "recommendations": { - "deepWork": "20:00-02:00", - "lightTasks": "09:00-12:00", - "exercise": "17:00-19:00", - "caffeineCutoff": "14:00" - }, - "updatedAt": "2026-02-12T00:00:00.000Z" -} -``` - -**Derivation logic**: Five genome sleep markers provide the genetic baseline: CLOCK (evening preference), DEC2 (sleep duration need), PER2 (circadian period), CRY1 (delayed sleep phase), MTNR1B (melatonin receptor / nighttime glucose). The `daily_routines` enrichment answers provide behavioral confirmation. When genetic and behavioral signals agree, confidence is high. When they disagree, surface the conflict for user review. Caffeine cutoff cross-references caffeine metabolism markers (CYP1A2 rs762551, ADA rs73598374). MTNR1B status also informs late-eating recommendations. - -#### Entity: Aesthetic Taste Profile (`aesthetics.json`) - -Consolidates scattered aesthetic data into a structured profile. - -```json -{ - "profile": { - "visualStyle": [], - "narrativePreferences": [], - "musicProfile": [], - "designPrinciples": [], - "antiPatterns": [] - }, - "sources": { - "enrichmentAnswers": { "aesthetics": "...", "questionsAnswered": 0 }, - "bookAnalysis": { "themes": [], "sourceDoc": "BOOKS.md" }, - "movieAnalysis": { "themes": [], "sourceDoc": "MOVIES.md" }, - "musicAnalysis": { "themes": [], "sourceDoc": "AUDIO.md" } - }, - "questionnaire": { - "completed": false, - "sections": [ - "visual_design", - "color_and_mood", - "architecture_and_space", - "fashion_and_texture", - "sound_and_music", - "narrative_and_story", - "anti_preferences" - ] - }, - "updatedAt": null -} -``` - -**Derivation logic**: Taste is partially observable from existing enrichment data (book/movie/music lists). The aesthetic questionnaire fills in the rest via prompted sections — each section shows image/description pairs and asks for preference rankings. LLM analysis of existing media lists extracts themes (e.g., "brutalist minimalism", "high-contrast neon", "atmospheric dread") to seed the profile. - -#### Entity: Mortality-Aware Goals (`goals.json`) - -```json -{ - "birthDate": "1980-01-15", - "lifeExpectancyEstimate": { - "baseline": 78.5, - "adjusted": null, - "adjustmentFactors": { - "geneticLongevity": null, - "cardiovascularRisk": null, - "lifestyle": null - }, - "source": "SSA actuarial table + genome markers" - }, - "timeHorizons": { - "yearsRemaining": null, - "healthyYearsRemaining": null, - "percentLifeComplete": null - }, - "goals": [ - { - "id": "uuid", - "title": "...", - "description": "...", - "horizon": "5-year", - "category": "creative|family|health|financial|legacy|mastery", - "urgency": null, - "status": "active|completed|abandoned", - "milestones": [], - "createdAt": "...", - "updatedAt": "..." - } - ], - "updatedAt": null -} -``` - -**Derivation logic**: Birth date + actuarial baseline + genome longevity/cardiovascular markers produce an adjusted life expectancy. This creates urgency scoring: a "legacy" goal with a 20-year timeline hits differently at 30% life-complete vs 70%. Goals are categorized and scored by time-decay urgency. The system can suggest reprioritization when markers indicate risk factors (e.g., high cardiovascular genetic risk → prioritize health goals). - -### Entity Relationships - -``` - ┌──────────────────┐ - │ identity.json │ - │ (orchestrator) │ - └──┬───┬───┬───┬──┘ - │ │ │ │ - ┌──────────┘ │ │ └──────────┐ - ▼ ▼ ▼ ▼ - ┌─────────┐ ┌──────────┐ ┌──────────┐ ┌─────────┐ - │ Genome │ │Chronotype│ │Aesthetics│ │ Goals │ - │genome.json│ │chrono.json│ │aesth.json│ │goals.json│ - └────┬────┘ └────┬─────┘ └────┬─────┘ └────┬────┘ - │ │ │ │ - │ ┌─────────┘ │ │ - │ │ derives from │ │ - ├────┤ sleep markers │ │ - │ │ │ │ - │ │ caffeine cutoff ◄─────┤ │ - │ │ from caffeine markers │ │ - │ │ │ │ - │ └───────────────────────┤ │ - │ │ │ - │ longevity/cardio ────────────────────► │ - │ markers inform │ urgency │ - │ life expectancy │ scoring │ - │ │ │ - │ ┌─────────────┘ │ - │ │ derives from │ - │ │ enrichment: aesthetics, │ - │ │ books, movies, music │ - │ │ │ - └──────────────┴────────────────────────────┘ - All reference meta.json - (documents, enrichment, traits) -``` - -**Cross-cutting links** (stored in `identity.json.crossLinks`): -- `genome:sleep` → `chronotype:genetic` (CLOCK/DEC2/PER2/CRY1/MTNR1B markers feed chronotype) -- `genome:caffeine` → `chronotype:recommendations.caffeineCutoff` (CYP1A2/ADA markers set cutoff) -- `genome:sleep:mtnr1b` → `chronotype:recommendations.lateEatingCutoff` (MTNR1B impairs nighttime glucose) -- `genome:longevity` + `genome:cardiovascular` → `goals:lifeExpectancyEstimate` (risk-adjusted lifespan) -- `enrichment:daily_routines` → `chronotype:behavioral` (self-reported schedule) -- `enrichment:aesthetics` + `enrichment:favorite_*` + `enrichment:music_taste` → `aesthetics:profile` (taste extraction) -- `traits:valuesHierarchy` → `goals:category` priority weighting (autonomy-valuing person weights mastery goals higher) - -### Identity Page Structure - -The existing Digital Twin page at `/digital-twin/:tab` gets a new **Identity** tab that serves as the unified view. Individual subsystem tabs (Genome, Enrich) remain for deep dives. - -#### Route: `/digital-twin/identity` - -``` -┌─────────────────────────────────────────────────────────────┐ -│ Digital Twin │ -│ Overview | Documents | ... | Identity | Genome | ... │ -├─────────────────────────────────────────────────────────────┤ -│ │ -│ ┌─ Identity Dashboard ──────────────────────────────────┐ │ -│ │ Completeness: ████████░░ 72% │ │ -│ │ 4 sections: Genome ✓ Chronotype ◐ Taste ○ Goals ○│ │ -│ └───────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─ Genome Summary Card ─────────────────────────────────┐ │ -│ │ 117 markers scanned across 32 categories │ │ -│ │ Key findings: ~20 beneficial, ~40 concern, ~5 major │ │ -│ │ [View Full Genome →] │ │ -│ └───────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─ Chronotype Card ─────────────────────────────────────┐ │ -│ │ Type: Evening Owl (75% confidence from 5 markers) │ │ -│ │ Genetic: CLOCK T/C + CRY1 C/C + PER2 C/C + DEC2 G/G│ │ -│ │ Peak focus: 8pm-2am | Caffeine cutoff: 2pm │ │ -│ │ Late eating cutoff: 8pm (MTNR1B-informed) │ │ -│ │ [Configure Schedule →] │ │ -│ └───────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─ Aesthetic Taste Card ────────────────────────────────┐ │ -│ │ Taste Tab: 0/5 sections completed (P2 UI ready) │ │ -│ │ Detected themes from media: brutalist, atmospheric │ │ -│ │ [Continue Taste Questionnaire →] [Go to Taste Tab →] │ │ -│ └───────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─ Life Goals Card ─────────────────────────────────────┐ │ -│ │ Status: Not configured │ │ -│ │ Set birth date and goals to enable mortality-aware │ │ -│ │ priority scoring │ │ -│ │ [Set Up Goals →] │ │ -│ └───────────────────────────────────────────────────────┘ │ -│ │ -│ ┌─ Cross-Insights ──────────────────────────────────────┐ │ -│ │ "Your CLOCK gene evening tendency + caffeine │ │ -│ │ sensitivity suggest cutting coffee by 2pm" │ │ -│ │ "Longevity marker FOXO3A T/T (concern) + IL-6 C/C │ │ -│ │ (inflammation concern) — prioritize health goals" │ │ -│ └───────────────────────────────────────────────────────┘ │ -│ │ -└─────────────────────────────────────────────────────────────┘ -``` - -#### Sub-routes for deep dives: -- `/digital-twin/identity` — Dashboard overview (above) -- `/digital-twin/identity/chronotype` — Full chronotype editor with schedule builder -- `/digital-twin/identity/taste` — Aesthetic questionnaire flow (section-by-section) -- `/digital-twin/identity/goals` — Goal CRUD with urgency visualization -- `/digital-twin/genome` — Existing genome tab (unchanged) - -### Implementation Phases - -#### P1: Identity Orchestrator & Chronotype (data layer) -- Create `data/digital-twin/identity.json` with section status tracking -- Create `server/services/identity.js` — orchestrator that reads from genome, enrichment, taste-profile, and new data files -- Create `data/digital-twin/chronotype.json` — derive from 5 genome sleep markers + daily_routines enrichment -- Add `GET /api/digital-twin/identity` route returning unified section status -- Add `GET/PUT /api/digital-twin/identity/chronotype` routes -- Derivation function: `deriveChronotypeFromGenome(genomeSummary)` extracts all 5 sleep markers (CLOCK, DEC2, PER2, CRY1, MTNR1B) → composite chronotype signal with weighted confidence -- Cross-reference CYP1A2/ADA caffeine markers and MTNR1B melatonin receptor for caffeine cutoff and late-eating recommendations - -#### P2: Aesthetic Taste Questionnaire ✅ -- Created `data/digital-twin/taste-profile.json` for structured taste preference storage -- Created `server/services/taste-questionnaire.js` with 5 taste sections (movies, music, visual_art, architecture, food), each with core questions and branching follow-ups triggered by keyword detection -- Added 7 API routes under `/api/digital-twin/taste/*` (profile, sections, next question, answer, responses, summary, reset) -- Built `TasteTab.jsx` conversational Q&A UI with section grid, question flow, review mode, and AI-powered summary generation -- Responses persisted to taste-profile.json and appended to AESTHETICS.md for digital twin context -- Added Taste tab to Digital Twin page navigation - -#### P3: Mortality-Aware Goal Tracking -- Create `data/digital-twin/goals.json` -- Add `GET/POST/PUT/DELETE /api/digital-twin/identity/goals` routes -- Birth date input + SSA actuarial table lookup -- Genome-adjusted life expectancy: weight longevity markers (5 markers: FOXO3A, IGF1R, CETP, IPMK, TP53) and cardiovascular risk markers (5 markers: Factor V, 9p21, Lp(a), LPA aspirin, PCSK9) into adjustment factor -- Time-horizon calculation: years remaining, healthy years, percent complete -- Urgency scoring: `urgency = (goalHorizonYears - yearsRemaining) / goalHorizonYears` normalized -- Goal CRUD with category tagging and milestone tracking - -#### P4: Identity Tab UI -- Add `identity` tab to `TABS` constant in `constants.js` -- Create `IdentityTab.jsx` with dashboard layout (4 summary cards + cross-insights) -- Create `ChronotypeEditor.jsx` — schedule visualization and override controls -- Create `TasteQuestionnaire.jsx` — section-by-section prompted flow -- Create `GoalTracker.jsx` — goal list with urgency heatmap and timeline view -- Wire sub-routes for deep dives - -#### P2.5: Digital Twin Aesthetic Taste Prompting (brain idea 608dc733) - -##### Problem - -P2's Taste questionnaire uses static questions and keyword-triggered follow-ups. The questions are good but generic — they don't reference anything the twin already knows about the user. Brain idea 608dc733 proposes using the digital twin's existing knowledge (books, music, movie lists, enrichment answers, personality traits) to generate personalized, conversational prompts that feel like talking to someone who already knows you rather than filling out a survey. - -##### What Data to Capture - -The aesthetic taste system captures preferences across **7 domains**, extending P2's 5 sections with 2 new ones (fashion/texture and digital/interface): - -| Domain | Data Captured | Sources That Seed It | -|--------|--------------|---------------------| -| **Movies & Film** | Visual style preferences, narrative structure, mood/atmosphere, genre affinities, anti-preferences, formative films | BOOKS.md (narrative taste), enrichment:favorite_movies, existing P2 responses | -| **Music & Sound** | Functional use (focus/energy/decompress), genre affinities, production preferences, anti-sounds, formative artists | AUDIO.md, enrichment:music_taste, existing P2 responses | -| **Visual Art & Design** | Minimalism vs maximalism spectrum, color palette preferences, design movements, typography, layout sensibility | CREATIVE.md, enrichment:aesthetics, existing P2 responses | -| **Architecture & Spaces** | Material preferences, light quality, scale/intimacy, indoor-outdoor relationship, sacred vs functional | enrichment:aesthetics, existing P2 responses | -| **Food & Culinary** | Flavor profiles, cuisine affinities, cooking philosophy, dining experience priorities, sensory texture preferences | enrichment:daily_routines (meal patterns), existing P2 responses | -| **Fashion & Texture** *(new)* | Material/fabric preferences, silhouette comfort, color wardrobe, formality spectrum, tactile sensitivity | genome:sensory markers (if available), enrichment:aesthetics | -| **Digital & Interface** *(new)* | Dark vs light mode, information density, animation tolerance, typography preferences, notification style, tool aesthetics | PREFERENCES.md, existing PortOS theme choices (port-bg, port-card etc.) | - -Each domain captures: -- **Positive affinities** — what they're drawn to and why -- **Anti-preferences** — what they actively avoid (often more revealing than likes) -- **Functional context** — how the preference serves them (focus, comfort, identity, social) -- **Formative influences** — early experiences that shaped the preference -- **Evolution** — how the preference has changed over time - -##### Conversational Prompting Flow - -The key design principle: **conversation, not survey**. The twin generates questions that reference things it already knows, creating a dialogue that feels like it's building on shared context. - -**Flow architecture:** - -``` -┌─────────────────────────────────────────────────┐ -│ 1. Context Aggregation │ -│ Read: BOOKS.md, AUDIO.md, CREATIVE.md, │ -│ PREFERENCES.md, enrichment answers, │ -│ existing taste-profile.json responses, │ -│ personality traits (Big Five Openness) │ -├─────────────────────────────────────────────────┤ -│ 2. Static Core Question (from P2) │ -│ Serve the existing static question first │ -│ to establish baseline in that domain │ -├─────────────────────────────────────────────────┤ -│ 3. Personalized Follow-Up Generation │ -│ LLM generates 1 contextual follow-up using │ -│ identity context + previous answer │ -│ e.g., "You listed Blade Runner — what about │ -│ its visual language specifically grabbed you?" │ -├─────────────────────────────────────────────────┤ -│ 4. Depth Probing (optional, user-initiated) │ -│ "Want to go deeper?" button generates │ -│ another personalized question that connects │ -│ across domains (e.g., music taste ↔ visual) │ -├─────────────────────────────────────────────────┤ -│ 5. Summary & Synthesis │ -│ After core + follow-ups complete, LLM │ -│ generates section summary + cross-domain │ -│ pattern detection │ -└─────────────────────────────────────────────────┘ -``` - -**Prompt template for personalized question generation:** - -``` -You are a thoughtful interviewer building an aesthetic taste profile. -You already know the following about this person: - -## Identity Context -{identityContext — excerpts from BOOKS.md, AUDIO.md, enrichment answers, traits} - -## Previous Responses in This Section -{existingResponses — Q&A pairs from taste-profile.json for this section} - -## Section: {sectionLabel} - -Generate ONE follow-up question that: -1. References something specific from their identity context or previous answers -2. Probes WHY they prefer what they do, not just WHAT -3. Feels conversational — like a friend who knows them asking a natural question -4. Explores an angle their previous answers haven't covered yet -5. Is concise (1-2 sentences max) - -Do NOT: -- Ask generic questions that ignore the context -- Repeat topics already covered in previous responses -- Use survey language ("On a scale of 1-10...") -- Ask multiple questions at once -``` - -**Example personalized exchanges:** - -> **Static (P2):** "Name 3-5 films you consider near-perfect." -> **User:** "Blade Runner, Stalker, Lost in Translation, Drive, Arrival" -> -> **Personalized (P2.5):** "Your BOOKS.md lists several sci-fi titles with themes of isolation and altered perception. Four of your five film picks share that same atmosphere. Is solitude a feature of stories you're drawn to, or is it more about the specific visual treatment of lonely spaces?" - -> **Static (P2):** "What artists or albums have had a lasting impact?" -> **User:** "Radiohead, Boards of Canada, Massive Attack" -> -> **Personalized (P2.5):** "All three of those artists layer heavy texture over minimalist structures. Your CREATIVE.md mentions an appreciation for 'controlled complexity.' Does this principle — density within restraint — apply to how you think about visual design too?" - -##### Data Model — Where Taste Lives - -Taste data lives in **two files** with distinct roles: - -**1. Raw questionnaire responses: `data/digital-twin/taste-profile.json`** (existing, extended) - -```json -{ - "version": "2.0.0", - "createdAt": "...", - "updatedAt": "...", - "sections": { - "movies": { - "status": "completed", - "responses": [ - { - "questionId": "movies-core-1", - "answer": "Blade Runner, Stalker, Lost in Translation...", - "answeredAt": "...", - "source": "static" - }, - { - "questionId": "movies-p25-1", - "answer": "It's not solitude per se, it's the visual...", - "answeredAt": "...", - "source": "personalized", - "generatedQuestion": "Your BOOKS.md lists several sci-fi titles...", - "identityContextUsed": ["BOOKS.md:sci-fi-themes", "taste:movies-core-1"] - } - ], - "summary": "..." - }, - "fashion": { "status": "pending", "responses": [], "summary": null }, - "digital": { "status": "pending", "responses": [], "summary": null } - }, - "profileSummary": null, - "lastSessionAt": null -} -``` - -Changes from v1: -- `source` field distinguishes static vs personalized questions -- `generatedQuestion` stores the LLM-generated question text (since personalized questions aren't in the static definition) -- `identityContextUsed` tracks which identity sources informed the question (for provenance) -- Two new sections: `fashion`, `digital` -- Version bumped to 2.0.0 - -**2. Synthesized aesthetic profile: `data/digital-twin/aesthetics.json`** (planned in P1, populated by P2.5) - -```json -{ - "version": "1.0.0", - "updatedAt": "...", - "profile": { - "visualStyle": ["brutalist minimalism", "high-contrast neon", "controlled complexity"], - "narrativePreferences": ["isolation themes", "slow burn", "ambiguity over resolution"], - "musicProfile": ["textural electronica", "atmospheric layering", "functional listening"], - "spatialPreferences": ["raw materials", "dramatic light", "intimacy over grandeur"], - "culinaryIdentity": ["umami-driven", "improvisational cooking", "experience over formality"], - "fashionSensibility": ["monochrome", "natural fibers", "minimal branding"], - "digitalAesthetic": ["dark mode", "high information density", "subtle animation"], - "antiPatterns": ["visual clutter", "forced symmetry", "saccharine sentimentality"], - "corePrinciples": ["density within restraint", "function informing form", "earned complexity"] - }, - "sources": { - "tasteQuestionnaire": { - "sectionsCompleted": 7, - "totalResponses": 28, - "lastUpdated": "..." - }, - "enrichment": { - "aesthetics": { "questionsAnswered": 5 }, - "favoriteBooks": { "analyzed": true, "themes": ["existential sci-fi", "systems thinking"] }, - "favoriteMovies": { "analyzed": true, "themes": ["atmospheric isolation", "neon noir"] }, - "musicTaste": { "analyzed": true, "themes": ["textural electronica", "ambient"] } - }, - "documents": ["BOOKS.md", "AUDIO.md", "CREATIVE.md", "PREFERENCES.md"] - }, - "crossDomainPatterns": [ - "Preference for 'controlled complexity' appears across music (layered textures), visual art (minimalist structure with dense detail), architecture (raw materials with precise placement), and food (complex umami built from simple ingredients)", - "Anti-preference for overt sentimentality spans film (avoids melodrama), music (dislikes saccharine pop), and design (rejects decorative ornamentation)" - ], - "genomicCorrelations": { - "tasteReceptorGenes": "TAS2R38 status may correlate with bitter-food tolerance preferences", - "sensoryProcessing": "Olfactory receptor variants may explain heightened texture sensitivity" - } -} -``` - -This file is the **canonical aesthetic profile** referenced by the Identity orchestrator (`identity.json`). It is regenerated whenever taste-profile.json accumulates significant new responses. - -##### Implementation Steps - -1. **Add 2 new sections** to `TASTE_SECTIONS` in `taste-questionnaire.js`: `fashion` and `digital`, each with 3 core questions and keyword-triggered follow-ups -2. **Add `aggregateIdentityContext(sectionId)`** to `taste-questionnaire.js` — reads BOOKS.md, AUDIO.md, CREATIVE.md, PREFERENCES.md, enrichment answers, and existing taste responses to build a context string for the LLM -3. **Add `generatePersonalizedTasteQuestion(sectionId, existingResponses, identityContext)`** — calls the active AI provider with the prompt template above, returns a single personalized follow-up question -4. **Add `POST /api/digital-twin/taste/:section/personalized-question`** route that returns a generated question -5. **Extend `submitAnswer()`** to accept `source: 'personalized'` and store `generatedQuestion` + `identityContextUsed` metadata -6. **Add "Go deeper" button** to TasteTab.jsx after each static follow-up cycle completes — clicking it calls the personalized question endpoint -7. **Add `generateAestheticsProfile()`** to `taste-questionnaire.js` — synthesizes all taste-profile.json responses + enrichment data into `aesthetics.json` -8. **Bump taste-profile.json version** to 2.0.0, migrate existing responses to include `source: 'static'` -9. **Update TasteTab.jsx** to render personalized questions differently (subtle indicator showing the twin referenced specific context) - -##### Prerequisite Relaxation - -The original spec listed P1 (Identity orchestrator) as a hard prerequisite. This is relaxed: P2.5 can read identity documents directly from the filesystem (`BOOKS.md`, `AUDIO.md`, etc.) and enrichment data from `meta.json` without needing the orchestrator layer. The orchestrator becomes useful for caching and cross-section queries but is not strictly required for context aggregation. - -#### P5: Cross-Insights Engine -- Add `generateCrossInsights(identity)` in identity service -- Cross-reference genome markers with chronotype, goals, and enrichment data -- Generate natural-language insight strings (e.g., caffeine + chronotype, longevity + goal urgency) -- Display on Identity dashboard and inject into CoS context when relevant -- Consider autonomous job: periodic identity insight refresh -- Example cross-insights from current marker data: - - CLOCK + CRY1 + PER2 → composite chronotype confidence (3 markers agreeing = high confidence evening/morning) - - MTNR1B concern + evening chronotype → "avoid eating after 8pm — your melatonin receptor variant impairs late glucose handling" - - CYP1A2 slow metabolizer + CLOCK evening → "caffeine cutoff by noon, not 2pm" - - FOXO3A/CETP/IGF1R longevity markers + cardiovascular risk → adjusted life expectancy for goal urgency - -### Identity Extension Roadmap - -This roadmap connects brain ideas and the Genome Section Integration project (0e6a0332) into a unified implementation sequence. - -#### Source Ideas -- **Brain idea 608dc733**: "Prompting Aesthetic Taste Docs via Digital Twin" — use the twin's existing knowledge to generate personalized aesthetic preference questions -- **Brain idea 284dd487**: "Genome Types & Chronotype Trait" — derive chronotype from 5 sleep/circadian markers + behavioral data -- **Project 0e6a0332**: "Genome Section Integration" — unify genome data with Identity page architecture - -#### Phase Dependency Graph - -``` -P1: Identity Orchestrator & Chronotype ──── (brain idea 284dd487) - │ Creates identity.json, chronotype.json, - │ identity service, derivation from 5 sleep markers - │ - ├─► P2.5: Personalized Taste Prompting ─── (brain idea 608dc733) - │ Uses identity context to generate smart taste questions - │ Enhances existing TasteTab with twin-aware follow-ups - │ - ├─► P3: Mortality-Aware Goal Tracking - │ Birth date + genome longevity/cardio markers → life expectancy - │ Urgency scoring for prioritized goal management - │ - └─► P4: Identity Tab UI - Dashboard with summary cards for all 4 sections - Sub-routes for chronotype, taste, goals deep dives - │ - └─► P5: Cross-Insights Engine - Reads all sections, generates natural-language insights - Injects identity context into CoS agent briefings -``` - -#### Implementation Priority -1. **P1** — Foundation: nothing else works without the orchestrator -2. **P2.5** — Quick win: enhances existing Taste tab with minimal new infrastructure -3. **P3** — New feature: mortality-aware goals need genome data flowing through identity service -4. **P4** — UI: renders what P1-P3 produce -5. **P5** — Polish: cross-entity reasoning requires all sections populated - -### Data Flow +## Planned Feature Details -``` -User uploads 23andMe → genome.json (117 markers, 32 categories) - ↓ -Identity service reads 5 sleep markers + 2 caffeine markers - ↓ -Derives chronotype.json (+ behavioral input from daily_routines enrichment) - ↓ -Twin reads identity context → generates personalized taste questions (P2.5) - ↓ -User completes taste questionnaire → taste-profile.json → aesthetics.json - ↓ -LLM analyzes books/movies/music docs → seeds aesthetic profile themes - ↓ -User sets birth date → goals.json (life expectancy from actuarial + 10 genome markers) - ↓ -Cross-insights engine reads all 4 sections → generates natural-language insights - ↓ -Identity tab renders unified dashboard with summary cards + insights - ↓ -CoS injects identity context into agent briefings when relevant -``` +### M7: App Templates -### Files to Create/Modify +Templates allow creating new apps from pre-configured project structures. -**New files:** -- `data/digital-twin/identity.json` — orchestrator metadata -- `data/digital-twin/chronotype.json` — derived chronotype profile -- `data/digital-twin/aesthetics.json` — taste profile -- `data/digital-twin/goals.json` — mortality-aware goals -- `server/services/identity.js` — identity orchestration service -- `server/routes/identity.js` — API routes -- `server/lib/identityValidation.js` — Zod schemas -- `client/src/components/digital-twin/tabs/IdentityTab.jsx` — dashboard -- `client/src/components/digital-twin/identity/ChronotypeEditor.jsx` -- `client/src/components/digital-twin/identity/TasteQuestionnaire.jsx` -- `client/src/components/digital-twin/identity/GoalTracker.jsx` -- `client/src/components/digital-twin/identity/CrossInsights.jsx` +**Built-in Template: PortOS Stack** +- Express.js API server +- React + Vite frontend +- Tailwind CSS styling +- PM2 ecosystem configuration +- GitHub Actions CI/CD workflows +- Auto-versioning system -**Modified files:** -- `client/src/components/digital-twin/constants.js` — add Identity tab -- `client/src/pages/DigitalTwin.jsx` — add Identity tab rendering -- `client/src/services/api.js` — add identity API methods -- `server/index.js` — mount identity routes -- `server/services/taste-questionnaire.js` — add `generatePersonalizedTasteQuestion()` using identity context (P2.5) -- `client/src/components/digital-twin/tabs/TasteTab.jsx` — wire personalized question generation (P2.5) +**Features** +1. Template Selection - Browse available templates with feature descriptions +2. App Creation - Scaffold new project with chosen name and target directory +3. Custom Templates - Register additional templates from local paths +4. Template Management - View, edit, delete custom templates -### Design Decisions +**Pages** +- `/templates` - Template browser and app creation +- `/templates/new` - Register custom template -1. **Separate data files per section** (not one giant file) — each section has independent update cadence and the genome file (82KB) is already large -2. **Derivation over duplication** — chronotype reads from genome.json at query time rather than copying marker data. Identity service is the join layer -3. **Progressive disclosure** — Identity tab shows summary cards; deep dives are sub-routes, not modals (per CLAUDE.md: all views must be deep-linkable) -4. **LLM-assisted but user-confirmed** — aesthetic themes extracted by LLM from media lists are suggestions, not gospel. User confirms/edits -5. **No new dependencies** — uses existing Zod, Express, React, Lucide stack -6. **Genome data stays read-only** — identity service reads genome markers but never writes to genome.json -7. **Taste data consolidation** — P2 created `taste-profile.json` (5 sections). P2.5 adds twin-aware personalized questions. Long-term, taste data migrates into `aesthetics.json` as the canonical aesthetic profile, with taste-profile.json as the raw questionnaire responses -8. **Weighted chronotype confidence** — 5 sleep markers weighted by specificity: CRY1 (strongest DSPD signal) > CLOCK (evening tendency) > PER2 (circadian period) > MTNR1B (melatonin coupling) > DEC2 (duration, not phase). Behavioral data from daily_routines enrichment gets equal weight to genetic composite +**API Endpoints** +| Route | Description | +|-------|-------------| +| GET /api/templates | List all templates | +| POST /api/templates | Add custom template | +| POST /api/templates/create | Create app from template | +| DELETE /api/templates/:id | Remove custom template | --- @@ -972,138 +125,27 @@ CoS injects identity context into agent briefings when relevant - [Troubleshooting](./docs/TROUBLESHOOTING.md) - Common issues and solutions ### Feature Documentation +- [Agent Skills](./docs/features/agent-skills.md) - Task-type-specific prompt templates and routing - [App Wizard](./docs/features/app-wizard.md) - Register apps and create from templates - [Autofixer](./docs/features/autofixer.md) - Autonomous crash detection and repair - [Brain System](./docs/features/brain-system.md) - Second-brain capture and classification +- [Browser Management](./docs/features/browser.md) - CDP/Playwright browser management - [Chief of Staff](./docs/features/chief-of-staff.md) - Autonomous agent orchestration - [CoS Agent Runner](./docs/features/cos-agent-runner.md) - Isolated agent process management - [CoS Enhancement](./docs/features/cos-enhancement.md) - M35 hybrid memory, missions, thinking levels - [Digital Twin](./docs/features/digital-twin.md) - Quantitative personality modeling - [Error Handling](./docs/features/error-handling.md) - Graceful error handling with auto-fix +- [Identity System](./docs/features/identity-system.md) - Unified identity architecture (M42 spec) - [Memory System](./docs/features/memory-system.md) - Semantic memory with LLM classification - [Prompt Manager](./docs/features/prompt-manager.md) - Customizable AI prompts - [Soul System](./docs/features/soul-system.md) - Digital twin identity scaffold -- [Browser Management](./docs/features/browser.md) - CDP/Playwright browser management --- ## Next Actions -Based on recent work and incomplete milestones: - -1. **Complete M7: App Templates** - Implement template management UI and app scaffolding from templates -2. **Digital Twin P3: Behavioral Feedback Loop** - Add "sounds like me" response validation and adaptive weighting -3. **Vision API Polish** - Continue refining LM Studio vision integration based on test results -4. **Memory Consolidation** - Implement automatic memory consolidation for similar memories -5. **M40: Agent Skill System** - See details below - ---- - -## M40: Agent Skill System - -Inspired by [OpenAI Skills & Shell Tips](https://developers.openai.com/blog/skills-shell-tips), this milestone improves CoS agent accuracy and reliability through better task routing, prompt specificity, and context management. - -### P1: Task-Type-Specific Agent Prompts (Skill Templates) ✅ -Created specialized prompt templates per task category with routing, examples, and guidelines: -- **Routing descriptions**: "Use when..." / "Don't use when..." sections in each skill template -- **Embedded examples**: Worked examples of successful completions for each task type -- **Task-specific guidelines**: Security audit includes OWASP checklist; feature includes validation/convention requirements; refactor emphasizes behavior preservation - -**Implemented**: -- Added `data/prompts/skills/` directory with 6 task-type templates: `bug-fix.md`, `feature.md`, `security-audit.md`, `refactor.md`, `documentation.md`, `mobile-responsive.md` -- Added `detectSkillTemplate()` and `loadSkillTemplate()` in `subAgentSpawner.js` with keyword-based matching (ordered by specificity — security/mobile before generic bug-fix/feature) -- Updated `buildAgentPrompt()` to inject matched skill template into both the Mustache template system and the fallback template -- Updated `cos-agent-briefing.md` with `{{#skillSection}}` conditional block -- Templates only loaded when matched to avoid token inflation - -### P2: Agent Context Compaction ✅ -Long-running agents can hit context limits causing failures. Add proactive context management: -- Pass `--max-turns` or equivalent context budget hints when spawning agents -- Track agent output length and detect when agents are approaching context limits -- ✅ Add compaction metadata to agent error analysis so retries can include "compact context" instructions -- ✅ Update the agent briefing to include explicit output format constraints for verbose task types - -### P3: Negative Example Coverage for Task Routing ✅ -Improve task-to-model routing accuracy by adding negative examples to the model selection logic: -- ✅ Document which task types should NOT use light models (already partially done, but formalize it) -- ✅ Add "anti-patterns" to task learning: when a task type fails with a specific model, record the negative signal via `routingAccuracy` cross-reference (taskType × modelTier) -- ✅ Surface routing accuracy metrics in the Learning tab so the user can see misroutes -- ✅ Enhanced `suggestModelTier()` to use negative signal data for smarter tier avoidance - -### P4: Deterministic Workflow Skills ✅ -For recurring autonomous jobs (daily briefing, git maintenance, security audit, app improvement), encode the full workflow as a deterministic skill: -- ✅ Each skill defines exact steps, expected outputs, and success criteria in `data/prompts/skills/jobs/` -- ✅ Prevents prompt drift across runs — jobs now load structured skill templates instead of inline prompt strings -- ✅ Skills are versioned and editable via the Prompt Manager UI (Job Skills tab) -- ✅ `generateTaskFromJob()` builds effective prompts from skill template sections (Steps, Expected Outputs, Success Criteria) -- ✅ API routes added: GET/PUT `/api/prompts/skills/jobs/:name`, preview via GET `/api/prompts/skills/jobs/:name/preview` - ---- - -## Error Handling Summary - -The server implements comprehensive error handling: -- **asyncHandler**: All routes wrapped with error handler that catches uncaught errors -- **ServerError**: Custom error class with status, code, severity, and context -- **Socket.IO Events**: Errors broadcast to UI via `error:occurred` event -- **Process Handlers**: Unhandled rejections and uncaught exceptions emit socket events -- **Logging**: Errors logged with emoji prefixes, no server crashes -- See [Error Handling](./docs/features/error-handling.md) for details - ---- - -## Security Audit (2026-01-08) - -Comprehensive security audit performed by CoS Self-Improvement agent. - -### Vulnerabilities Found and Fixed - -1. **Command Injection in Git Service** (CRITICAL - FIXED) - - File: `server/services/git.js` - - Fix: Replaced `exec()` with `spawn()` and `shell: false`, added path validation - -2. **Path Traversal in Screenshots Route** (HIGH - FIXED) - - File: `server/routes/screenshots.js` - - Fix: Added `sanitizeFilename()` and path validation - -### Secure Patterns (No Issues Found) -- Command execution uses allowlist -- PM2 operations use spawn with shell: false -- Input validation with Zod schemas -- No dangerouslySetInnerHTML in React -- API keys stored server-side only -- JSON content type required for mutations - ---- - -## Planned Feature Details - -### M7: App Templates - -Templates allow creating new apps from pre-configured project structures. - -**Built-in Template: PortOS Stack** -- Express.js API server -- React + Vite frontend -- Tailwind CSS styling -- PM2 ecosystem configuration -- GitHub Actions CI/CD workflows -- Auto-versioning system - -**Features** -1. Template Selection - Browse available templates with feature descriptions -2. App Creation - Scaffold new project with chosen name and target directory -3. Custom Templates - Register additional templates from local paths -4. Template Management - View, edit, delete custom templates - -**Pages** -- `/templates` - Template browser and app creation -- `/templates/new` - Register custom template - -**API Endpoints** -| Route | Description | -|-------|-------------| -| GET /api/templates | List all templates | -| POST /api/templates | Add custom template | -| POST /api/templates/create | Create app from template | -| DELETE /api/templates/:id | Remove custom template | +1. **M42 P1: Identity Orchestrator & Chronotype** - Create identity.json, chronotype.json, identity service, derive chronotype from 5 genome sleep markers. See [Identity System](./docs/features/identity-system.md) +2. **M42 P2.5: Personalized Taste Prompting** - Enhance TasteTab with twin-aware follow-up questions using identity context from existing documents +3. **M7: App Templates** - Implement template management UI and app scaffolding from templates +4. **M34 P3: Behavioral Feedback Loop** - Add "sounds like me" response validation and adaptive weighting +5. **M42 P3: Mortality-Aware Goal Tracking** - Birth date + genome longevity markers for urgency-scored goals diff --git a/docs/features/agent-skills.md b/docs/features/agent-skills.md new file mode 100644 index 00000000..62770f8a --- /dev/null +++ b/docs/features/agent-skills.md @@ -0,0 +1,42 @@ +# Agent Skill System (M40) + +Improves CoS agent accuracy and reliability through task-type-specific prompt templates, context compaction, negative example routing, and deterministic workflow skills. Inspired by [OpenAI Skills & Shell Tips](https://developers.openai.com/blog/skills-shell-tips). + +## P1: Task-Type-Specific Agent Prompts (Skill Templates) + +Created specialized prompt templates per task category with routing, examples, and guidelines: +- **Routing descriptions**: "Use when..." / "Don't use when..." sections in each skill template +- **Embedded examples**: Worked examples of successful completions for each task type +- **Task-specific guidelines**: Security audit includes OWASP checklist; feature includes validation/convention requirements; refactor emphasizes behavior preservation + +**Implementation:** +- Added `data/prompts/skills/` directory with 6 task-type templates: `bug-fix.md`, `feature.md`, `security-audit.md`, `refactor.md`, `documentation.md`, `mobile-responsive.md` +- Added `detectSkillTemplate()` and `loadSkillTemplate()` in `subAgentSpawner.js` with keyword-based matching (ordered by specificity -- security/mobile before generic bug-fix/feature) +- Updated `buildAgentPrompt()` to inject matched skill template into both the Mustache template system and the fallback template +- Updated `cos-agent-briefing.md` with `{{#skillSection}}` conditional block +- Templates only loaded when matched to avoid token inflation + +## P2: Agent Context Compaction + +Long-running agents can hit context limits causing failures. Added proactive context management: +- Pass `--max-turns` or equivalent context budget hints when spawning agents +- Track agent output length and detect when agents are approaching context limits +- Added compaction metadata to agent error analysis so retries can include "compact context" instructions +- Updated the agent briefing to include explicit output format constraints for verbose task types + +## P3: Negative Example Coverage for Task Routing + +Improved task-to-model routing accuracy by adding negative examples to the model selection logic: +- Documented which task types should NOT use light models +- Added "anti-patterns" to task learning: when a task type fails with a specific model, record the negative signal via `routingAccuracy` cross-reference (taskType x modelTier) +- Surfaced routing accuracy metrics in the Learning tab so the user can see misroutes +- Enhanced `suggestModelTier()` to use negative signal data for smarter tier avoidance + +## P4: Deterministic Workflow Skills + +For recurring autonomous jobs (daily briefing, git maintenance, security audit, app improvement), encoded the full workflow as a deterministic skill: +- Each skill defines exact steps, expected outputs, and success criteria in `data/prompts/skills/jobs/` +- Prevents prompt drift across runs -- jobs now load structured skill templates instead of inline prompt strings +- Skills are versioned and editable via the Prompt Manager UI (Job Skills tab) +- `generateTaskFromJob()` builds effective prompts from skill template sections (Steps, Expected Outputs, Success Criteria) +- API routes added: GET/PUT `/api/prompts/skills/jobs/:name`, preview via GET `/api/prompts/skills/jobs/:name/preview` diff --git a/docs/features/identity-system.md b/docs/features/identity-system.md new file mode 100644 index 00000000..d4d61529 --- /dev/null +++ b/docs/features/identity-system.md @@ -0,0 +1,612 @@ +# Unified Digital Twin Identity System (M42) + +Connects Genome (117 markers, 32 categories), Chronotype (5 sleep markers + behavioral), Aesthetic Taste (P2 complete, P2.5 adds twin-aware prompting), and Mortality-Aware Goals into a single coherent Identity architecture with cross-insights engine. + +## Motivation + +Four separate workstreams converge on the same vision: a personal digital twin that knows *who you are* biologically, temporally, aesthetically, and existentially. Today these live as disconnected features: + +| Subsystem | Current State | Location | +|-----------|--------------|----------| +| **Genome** | Fully implemented: 23andMe upload, 117 curated SNP markers across 32 categories, ClinVar integration, epigenetic tracking | `server/services/genome.js`, `GenomeTab.jsx`, `data/digital-twin/genome.json` | +| **Chronotype** | Genetic data ready: 5 sleep/circadian markers (CLOCK rs1801260, DEC2 rs57875989, PER2 rs35333999, CRY1 rs2287161, MTNR1B rs10830963) + `daily_routines` enrichment category. Derivation service not yet built | `curatedGenomeMarkers.js` sleep category, `ENRICHMENT_CATEGORIES.daily_routines` | +| **Aesthetic Taste** | P2 complete: Taste questionnaire with 5 sections (movies, music, visual_art, architecture, food), conversational Q&A, AI summary generation. Enrichment categories also feed taste data from book/movie/music lists | `TasteTab.jsx`, `taste-questionnaire.js`, `data/digital-twin/taste-profile.json` | +| **Goal Tracking** | Partially exists: `COS-GOALS.md` for CoS missions, `TASKS.md` for user tasks, `EXISTENTIAL.md` soul doc | `data/COS-GOALS.md`, `data/TASKS.md`, `data/digital-twin/EXISTENTIAL.md` | + +These should be unified under a single **Identity** architecture so the twin can reason across all dimensions (e.g., "your CLOCK gene says evening chronotype — schedule deep work after 8pm" or "given your longevity markers and age, here's how to prioritize your 10-year goals"). + +## Data Model + +### Entity: `identity.json` (top-level twin orchestration) + +```json +{ + "version": "1.0.0", + "createdAt": "2026-02-12T00:00:00.000Z", + "updatedAt": "2026-02-12T00:00:00.000Z", + "sections": { + "genome": { "status": "active", "dataFile": "genome.json", "markerCount": 117, "categoryCount": 32, "lastScanAt": "..." }, + "chronotype": { "status": "active", "dataFile": "chronotype.json", "derivedFrom": ["genome:sleep", "enrichment:daily_routines"] }, + "aesthetics": { "status": "active", "dataFile": "aesthetics.json", "derivedFrom": ["enrichment:aesthetics", "enrichment:favorite_books", "enrichment:favorite_movies", "enrichment:music_taste"] }, + "goals": { "status": "active", "dataFile": "goals.json" } + }, + "crossLinks": [] +} +``` + +### Entity: Chronotype Profile (`chronotype.json`) + +Derived from genome sleep markers + daily_routines enrichment answers + user overrides. + +```json +{ + "chronotype": "evening", + "confidence": 0.75, + "sources": { + "genetic": { + "clockGene": { "rsid": "rs1801260", "genotype": "T/C", "signal": "mild_evening" }, + "dec2": { "rsid": "rs57875989", "genotype": "G/G", "signal": "standard_sleep_need" }, + "per2": { "rsid": "rs35333999", "genotype": "C/C", "signal": "standard_circadian" }, + "cry1": { "rsid": "rs2287161", "genotype": "C/C", "signal": "standard_period" }, + "mtnr1b": { "rsid": "rs10830963", "genotype": "T/T", "signal": "normal_melatonin_receptor" } + }, + "behavioral": { + "preferredWakeTime": "08:30", + "preferredSleepTime": "00:30", + "peakFocusWindow": "20:00-02:00", + "energyDipWindow": "14:00-16:00" + } + }, + "recommendations": { + "deepWork": "20:00-02:00", + "lightTasks": "09:00-12:00", + "exercise": "17:00-19:00", + "caffeineCutoff": "14:00" + }, + "updatedAt": "2026-02-12T00:00:00.000Z" +} +``` + +**Derivation logic**: Five genome sleep markers provide the genetic baseline: CLOCK (evening preference), DEC2 (sleep duration need), PER2 (circadian period), CRY1 (delayed sleep phase), MTNR1B (melatonin receptor / nighttime glucose). The `daily_routines` enrichment answers provide behavioral confirmation. When genetic and behavioral signals agree, confidence is high. When they disagree, surface the conflict for user review. Caffeine cutoff cross-references caffeine metabolism markers (CYP1A2 rs762551, ADA rs73598374). MTNR1B status also informs late-eating recommendations. + +### Entity: Aesthetic Taste Profile (`aesthetics.json`) + +Consolidates scattered aesthetic data into a structured profile. + +```json +{ + "profile": { + "visualStyle": [], + "narrativePreferences": [], + "musicProfile": [], + "designPrinciples": [], + "antiPatterns": [] + }, + "sources": { + "enrichmentAnswers": { "aesthetics": "...", "questionsAnswered": 0 }, + "bookAnalysis": { "themes": [], "sourceDoc": "BOOKS.md" }, + "movieAnalysis": { "themes": [], "sourceDoc": "MOVIES.md" }, + "musicAnalysis": { "themes": [], "sourceDoc": "AUDIO.md" } + }, + "questionnaire": { + "completed": false, + "sections": [ + "visual_design", + "color_and_mood", + "architecture_and_space", + "fashion_and_texture", + "sound_and_music", + "narrative_and_story", + "anti_preferences" + ] + }, + "updatedAt": null +} +``` + +**Derivation logic**: Taste is partially observable from existing enrichment data (book/movie/music lists). The aesthetic questionnaire fills in the rest via prompted sections — each section shows image/description pairs and asks for preference rankings. LLM analysis of existing media lists extracts themes (e.g., "brutalist minimalism", "high-contrast neon", "atmospheric dread") to seed the profile. + +### Entity: Mortality-Aware Goals (`goals.json`) + +```json +{ + "birthDate": "1980-01-15", + "lifeExpectancyEstimate": { + "baseline": 78.5, + "adjusted": null, + "adjustmentFactors": { + "geneticLongevity": null, + "cardiovascularRisk": null, + "lifestyle": null + }, + "source": "SSA actuarial table + genome markers" + }, + "timeHorizons": { + "yearsRemaining": null, + "healthyYearsRemaining": null, + "percentLifeComplete": null + }, + "goals": [ + { + "id": "uuid", + "title": "...", + "description": "...", + "horizon": "5-year", + "category": "creative|family|health|financial|legacy|mastery", + "urgency": null, + "status": "active|completed|abandoned", + "milestones": [], + "createdAt": "...", + "updatedAt": "..." + } + ], + "updatedAt": null +} +``` + +**Derivation logic**: Birth date + actuarial baseline + genome longevity/cardiovascular markers produce an adjusted life expectancy. This creates urgency scoring: a "legacy" goal with a 20-year timeline hits differently at 30% life-complete vs 70%. Goals are categorized and scored by time-decay urgency. The system can suggest reprioritization when markers indicate risk factors (e.g., high cardiovascular genetic risk -> prioritize health goals). + +## Entity Relationships + +``` + +------------------+ + | identity.json | + | (orchestrator) | + +--+---+---+---+--+ + | | | | + +----------+ | | +----------+ + v v v v + +---------+ +----------+ +----------+ +---------+ + | Genome | |Chronotype| |Aesthetics| | Goals | + |genome.json| |chrono.json| |aesth.json| |goals.json| + +----+----+ +----+-----+ +----+-----+ +----+----+ + | | | | + | +---------+ | | + | | derives from | | + +----+ sleep markers | | + | | | | + | | caffeine cutoff <-----+ | + | | from caffeine markers | | + | | | | + | +-----------------------+ | + | | | + | longevity/cardio ---------------------->| + | markers inform | urgency | + | life expectancy | scoring | + | | | + | +-------------+ | + | | derives from | + | | enrichment: aesthetics, | + | | books, movies, music | + | | | + +--------------+----------------------------+ + All reference meta.json + (documents, enrichment, traits) +``` + +**Cross-cutting links** (stored in `identity.json.crossLinks`): +- `genome:sleep` -> `chronotype:genetic` (CLOCK/DEC2/PER2/CRY1/MTNR1B markers feed chronotype) +- `genome:caffeine` -> `chronotype:recommendations.caffeineCutoff` (CYP1A2/ADA markers set cutoff) +- `genome:sleep:mtnr1b` -> `chronotype:recommendations.lateEatingCutoff` (MTNR1B impairs nighttime glucose) +- `genome:longevity` + `genome:cardiovascular` -> `goals:lifeExpectancyEstimate` (risk-adjusted lifespan) +- `enrichment:daily_routines` -> `chronotype:behavioral` (self-reported schedule) +- `enrichment:aesthetics` + `enrichment:favorite_*` + `enrichment:music_taste` -> `aesthetics:profile` (taste extraction) +- `traits:valuesHierarchy` -> `goals:category` priority weighting (autonomy-valuing person weights mastery goals higher) + +## Identity Page Structure + +The existing Digital Twin page at `/digital-twin/:tab` gets a new **Identity** tab that serves as the unified view. Individual subsystem tabs (Genome, Enrich) remain for deep dives. + +### Route: `/digital-twin/identity` + +``` ++-------------------------------------------------------------+ +| Digital Twin | +| Overview | Documents | ... | Identity | Genome | ... | ++-------------------------------------------------------------+ +| | +| +- Identity Dashboard --------------------------------+ | +| | Completeness: xxxxxxxx.. 72% | | +| | 4 sections: Genome Y Chronotype ~ Taste . Goals .| | +| +-----------------------------------------------------+ | +| | +| +- Genome Summary Card -------------------------------+ | +| | 117 markers scanned across 32 categories | | +| | Key findings: ~20 beneficial, ~40 concern, ~5 major| | +| | [View Full Genome ->] | | +| +-----------------------------------------------------+ | +| | +| +- Chronotype Card -----------------------------------+ | +| | Type: Evening Owl (75% confidence from 5 markers) | | +| | Genetic: CLOCK T/C + CRY1 C/C + PER2 C/C + DEC2 G| | +| | Peak focus: 8pm-2am | Caffeine cutoff: 2pm | | +| | Late eating cutoff: 8pm (MTNR1B-informed) | | +| | [Configure Schedule ->] | | +| +-----------------------------------------------------+ | +| | +| +- Aesthetic Taste Card -------------------------------+ | +| | Taste Tab: 0/5 sections completed (P2 UI ready) | | +| | Detected themes from media: brutalist, atmospheric | | +| | [Continue Taste Questionnaire ->] [Go to Taste ->] | | +| +-----------------------------------------------------+ | +| | +| +- Life Goals Card -----------------------------------+ | +| | Status: Not configured | | +| | Set birth date and goals to enable mortality-aware | | +| | priority scoring | | +| | [Set Up Goals ->] | | +| +-----------------------------------------------------+ | +| | +| +- Cross-Insights ------------------------------------+ | +| | "Your CLOCK gene evening tendency + caffeine | | +| | sensitivity suggest cutting coffee by 2pm" | | +| | "Longevity marker FOXO3A T/T (concern) + IL-6 C/C | | +| | (inflammation concern) -- prioritize health goals" | | +| +-----------------------------------------------------+ | +| | ++-------------------------------------------------------------+ +``` + +### Sub-routes for deep dives: +- `/digital-twin/identity` -- Dashboard overview (above) +- `/digital-twin/identity/chronotype` -- Full chronotype editor with schedule builder +- `/digital-twin/identity/taste` -- Aesthetic questionnaire flow (section-by-section) +- `/digital-twin/identity/goals` -- Goal CRUD with urgency visualization +- `/digital-twin/genome` -- Existing genome tab (unchanged) + +## Implementation Phases + +### P1: Identity Orchestrator & Chronotype (data layer) +- Create `data/digital-twin/identity.json` with section status tracking +- Create `server/services/identity.js` -- orchestrator that reads from genome, enrichment, taste-profile, and new data files +- Create `data/digital-twin/chronotype.json` -- derive from 5 genome sleep markers + daily_routines enrichment +- Add `GET /api/digital-twin/identity` route returning unified section status +- Add `GET/PUT /api/digital-twin/identity/chronotype` routes +- Derivation function: `deriveChronotypeFromGenome(genomeSummary)` extracts all 5 sleep markers (CLOCK, DEC2, PER2, CRY1, MTNR1B) -> composite chronotype signal with weighted confidence +- Cross-reference CYP1A2/ADA caffeine markers and MTNR1B melatonin receptor for caffeine cutoff and late-eating recommendations + +### P2: Aesthetic Taste Questionnaire (complete) +- Created `data/digital-twin/taste-profile.json` for structured taste preference storage +- Created `server/services/taste-questionnaire.js` with 5 taste sections (movies, music, visual_art, architecture, food), each with core questions and branching follow-ups triggered by keyword detection +- Added 7 API routes under `/api/digital-twin/taste/*` (profile, sections, next question, answer, responses, summary, reset) +- Built `TasteTab.jsx` conversational Q&A UI with section grid, question flow, review mode, and AI-powered summary generation +- Responses persisted to taste-profile.json and appended to AESTHETICS.md for digital twin context +- Added Taste tab to Digital Twin page navigation + +### P2.5: Digital Twin Aesthetic Taste Prompting (brain idea 608dc733) + +#### Problem + +P2's Taste questionnaire uses static questions and keyword-triggered follow-ups. The questions are good but generic -- they don't reference anything the twin already knows about the user. Brain idea 608dc733 proposes using the digital twin's existing knowledge (books, music, movie lists, enrichment answers, personality traits) to generate personalized, conversational prompts that feel like talking to someone who already knows you rather than filling out a survey. + +#### What Data to Capture + +The aesthetic taste system captures preferences across **7 domains**, extending P2's 5 sections with 2 new ones (fashion/texture and digital/interface): + +| Domain | Data Captured | Sources That Seed It | +|--------|--------------|---------------------| +| **Movies & Film** | Visual style preferences, narrative structure, mood/atmosphere, genre affinities, anti-preferences, formative films | BOOKS.md (narrative taste), enrichment:favorite_movies, existing P2 responses | +| **Music & Sound** | Functional use (focus/energy/decompress), genre affinities, production preferences, anti-sounds, formative artists | AUDIO.md, enrichment:music_taste, existing P2 responses | +| **Visual Art & Design** | Minimalism vs maximalism spectrum, color palette preferences, design movements, typography, layout sensibility | CREATIVE.md, enrichment:aesthetics, existing P2 responses | +| **Architecture & Spaces** | Material preferences, light quality, scale/intimacy, indoor-outdoor relationship, sacred vs functional | enrichment:aesthetics, existing P2 responses | +| **Food & Culinary** | Flavor profiles, cuisine affinities, cooking philosophy, dining experience priorities, sensory texture preferences | enrichment:daily_routines (meal patterns), existing P2 responses | +| **Fashion & Texture** *(new)* | Material/fabric preferences, silhouette comfort, color wardrobe, formality spectrum, tactile sensitivity | genome:sensory markers (if available), enrichment:aesthetics | +| **Digital & Interface** *(new)* | Dark vs light mode, information density, animation tolerance, typography preferences, notification style, tool aesthetics | PREFERENCES.md, existing PortOS theme choices (port-bg, port-card etc.) | + +Each domain captures: +- **Positive affinities** -- what they're drawn to and why +- **Anti-preferences** -- what they actively avoid (often more revealing than likes) +- **Functional context** -- how the preference serves them (focus, comfort, identity, social) +- **Formative influences** -- early experiences that shaped the preference +- **Evolution** -- how the preference has changed over time + +#### Conversational Prompting Flow + +The key design principle: **conversation, not survey**. The twin generates questions that reference things it already knows, creating a dialogue that feels like it's building on shared context. + +**Flow architecture:** + +``` ++---------------------------------------------------+ +| 1. Context Aggregation | +| Read: BOOKS.md, AUDIO.md, CREATIVE.md, | +| PREFERENCES.md, enrichment answers, | +| existing taste-profile.json responses, | +| personality traits (Big Five Openness) | ++---------------------------------------------------+ +| 2. Static Core Question (from P2) | +| Serve the existing static question first | +| to establish baseline in that domain | ++---------------------------------------------------+ +| 3. Personalized Follow-Up Generation | +| LLM generates 1 contextual follow-up using | +| identity context + previous answer | +| e.g., "You listed Blade Runner -- what about | +| its visual language specifically grabbed you?" | ++---------------------------------------------------+ +| 4. Depth Probing (optional, user-initiated) | +| "Want to go deeper?" button generates | +| another personalized question that connects | +| across domains (e.g., music taste <-> visual) | ++---------------------------------------------------+ +| 5. Summary & Synthesis | +| After core + follow-ups complete, LLM | +| generates section summary + cross-domain | +| pattern detection | ++---------------------------------------------------+ +``` + +**Prompt template for personalized question generation:** + +``` +You are a thoughtful interviewer building an aesthetic taste profile. +You already know the following about this person: + +## Identity Context +{identityContext -- excerpts from BOOKS.md, AUDIO.md, enrichment answers, traits} + +## Previous Responses in This Section +{existingResponses -- Q&A pairs from taste-profile.json for this section} + +## Section: {sectionLabel} + +Generate ONE follow-up question that: +1. References something specific from their identity context or previous answers +2. Probes WHY they prefer what they do, not just WHAT +3. Feels conversational -- like a friend who knows them asking a natural question +4. Explores an angle their previous answers haven't covered yet +5. Is concise (1-2 sentences max) + +Do NOT: +- Ask generic questions that ignore the context +- Repeat topics already covered in previous responses +- Use survey language ("On a scale of 1-10...") +- Ask multiple questions at once +``` + +**Example personalized exchanges:** + +> **Static (P2):** "Name 3-5 films you consider near-perfect." +> **User:** "Blade Runner, Stalker, Lost in Translation, Drive, Arrival" +> +> **Personalized (P2.5):** "Your BOOKS.md lists several sci-fi titles with themes of isolation and altered perception. Four of your five film picks share that same atmosphere. Is solitude a feature of stories you're drawn to, or is it more about the specific visual treatment of lonely spaces?" + +> **Static (P2):** "What artists or albums have had a lasting impact?" +> **User:** "Radiohead, Boards of Canada, Massive Attack" +> +> **Personalized (P2.5):** "All three of those artists layer heavy texture over minimalist structures. Your CREATIVE.md mentions an appreciation for 'controlled complexity.' Does this principle -- density within restraint -- apply to how you think about visual design too?" + +#### Data Model -- Where Taste Lives + +Taste data lives in **two files** with distinct roles: + +**1. Raw questionnaire responses: `data/digital-twin/taste-profile.json`** (existing, extended) + +```json +{ + "version": "2.0.0", + "createdAt": "...", + "updatedAt": "...", + "sections": { + "movies": { + "status": "completed", + "responses": [ + { + "questionId": "movies-core-1", + "answer": "Blade Runner, Stalker, Lost in Translation...", + "answeredAt": "...", + "source": "static" + }, + { + "questionId": "movies-p25-1", + "answer": "It's not solitude per se, it's the visual...", + "answeredAt": "...", + "source": "personalized", + "generatedQuestion": "Your BOOKS.md lists several sci-fi titles...", + "identityContextUsed": ["BOOKS.md:sci-fi-themes", "taste:movies-core-1"] + } + ], + "summary": "..." + }, + "fashion": { "status": "pending", "responses": [], "summary": null }, + "digital": { "status": "pending", "responses": [], "summary": null } + }, + "profileSummary": null, + "lastSessionAt": null +} +``` + +Changes from v1: +- `source` field distinguishes static vs personalized questions +- `generatedQuestion` stores the LLM-generated question text (since personalized questions aren't in the static definition) +- `identityContextUsed` tracks which identity sources informed the question (for provenance) +- Two new sections: `fashion`, `digital` +- Version bumped to 2.0.0 + +**2. Synthesized aesthetic profile: `data/digital-twin/aesthetics.json`** (planned in P1, populated by P2.5) + +```json +{ + "version": "1.0.0", + "updatedAt": "...", + "profile": { + "visualStyle": ["brutalist minimalism", "high-contrast neon", "controlled complexity"], + "narrativePreferences": ["isolation themes", "slow burn", "ambiguity over resolution"], + "musicProfile": ["textural electronica", "atmospheric layering", "functional listening"], + "spatialPreferences": ["raw materials", "dramatic light", "intimacy over grandeur"], + "culinaryIdentity": ["umami-driven", "improvisational cooking", "experience over formality"], + "fashionSensibility": ["monochrome", "natural fibers", "minimal branding"], + "digitalAesthetic": ["dark mode", "high information density", "subtle animation"], + "antiPatterns": ["visual clutter", "forced symmetry", "saccharine sentimentality"], + "corePrinciples": ["density within restraint", "function informing form", "earned complexity"] + }, + "sources": { + "tasteQuestionnaire": { + "sectionsCompleted": 7, + "totalResponses": 28, + "lastUpdated": "..." + }, + "enrichment": { + "aesthetics": { "questionsAnswered": 5 }, + "favoriteBooks": { "analyzed": true, "themes": ["existential sci-fi", "systems thinking"] }, + "favoriteMovies": { "analyzed": true, "themes": ["atmospheric isolation", "neon noir"] }, + "musicTaste": { "analyzed": true, "themes": ["textural electronica", "ambient"] } + }, + "documents": ["BOOKS.md", "AUDIO.md", "CREATIVE.md", "PREFERENCES.md"] + }, + "crossDomainPatterns": [ + "Preference for 'controlled complexity' appears across music (layered textures), visual art (minimalist structure with dense detail), architecture (raw materials with precise placement), and food (complex umami built from simple ingredients)", + "Anti-preference for overt sentimentality spans film (avoids melodrama), music (dislikes saccharine pop), and design (rejects decorative ornamentation)" + ], + "genomicCorrelations": { + "tasteReceptorGenes": "TAS2R38 status may correlate with bitter-food tolerance preferences", + "sensoryProcessing": "Olfactory receptor variants may explain heightened texture sensitivity" + } +} +``` + +This file is the **canonical aesthetic profile** referenced by the Identity orchestrator (`identity.json`). It is regenerated whenever taste-profile.json accumulates significant new responses. + +#### Implementation Steps + +1. **Add 2 new sections** to `TASTE_SECTIONS` in `taste-questionnaire.js`: `fashion` and `digital`, each with 3 core questions and keyword-triggered follow-ups +2. **Add `aggregateIdentityContext(sectionId)`** to `taste-questionnaire.js` -- reads BOOKS.md, AUDIO.md, CREATIVE.md, PREFERENCES.md, enrichment answers, and existing taste responses to build a context string for the LLM +3. **Add `generatePersonalizedTasteQuestion(sectionId, existingResponses, identityContext)`** -- calls the active AI provider with the prompt template above, returns a single personalized follow-up question +4. **Add `POST /api/digital-twin/taste/:section/personalized-question`** route that returns a generated question +5. **Extend `submitAnswer()`** to accept `source: 'personalized'` and store `generatedQuestion` + `identityContextUsed` metadata +6. **Add "Go deeper" button** to TasteTab.jsx after each static follow-up cycle completes -- clicking it calls the personalized question endpoint +7. **Add `generateAestheticsProfile()`** to `taste-questionnaire.js` -- synthesizes all taste-profile.json responses + enrichment data into `aesthetics.json` +8. **Bump taste-profile.json version** to 2.0.0, migrate existing responses to include `source: 'static'` +9. **Update TasteTab.jsx** to render personalized questions differently (subtle indicator showing the twin referenced specific context) + +#### Prerequisite Relaxation + +The original spec listed P1 (Identity orchestrator) as a hard prerequisite. This is relaxed: P2.5 can read identity documents directly from the filesystem (`BOOKS.md`, `AUDIO.md`, etc.) and enrichment data from `meta.json` without needing the orchestrator layer. The orchestrator becomes useful for caching and cross-section queries but is not strictly required for context aggregation. + +### P3: Mortality-Aware Goal Tracking +- Create `data/digital-twin/goals.json` +- Add `GET/POST/PUT/DELETE /api/digital-twin/identity/goals` routes +- Birth date input + SSA actuarial table lookup +- Genome-adjusted life expectancy: weight longevity markers (5 markers: FOXO3A, IGF1R, CETP, IPMK, TP53) and cardiovascular risk markers (5 markers: Factor V, 9p21, Lp(a), LPA aspirin, PCSK9) into adjustment factor +- Time-horizon calculation: years remaining, healthy years, percent complete +- Urgency scoring: `urgency = (goalHorizonYears - yearsRemaining) / goalHorizonYears` normalized +- Goal CRUD with category tagging and milestone tracking + +### P4: Identity Tab UI +- Add `identity` tab to `TABS` constant in `constants.js` +- Create `IdentityTab.jsx` with dashboard layout (4 summary cards + cross-insights) +- Create `ChronotypeEditor.jsx` -- schedule visualization and override controls +- Create `TasteQuestionnaire.jsx` -- section-by-section prompted flow +- Create `GoalTracker.jsx` -- goal list with urgency heatmap and timeline view +- Wire sub-routes for deep dives + +### P5: Cross-Insights Engine +- Add `generateCrossInsights(identity)` in identity service +- Cross-reference genome markers with chronotype, goals, and enrichment data +- Generate natural-language insight strings (e.g., caffeine + chronotype, longevity + goal urgency) +- Display on Identity dashboard and inject into CoS context when relevant +- Consider autonomous job: periodic identity insight refresh +- Example cross-insights from current marker data: + - CLOCK + CRY1 + PER2 -> composite chronotype confidence (3 markers agreeing = high confidence evening/morning) + - MTNR1B concern + evening chronotype -> "avoid eating after 8pm -- your melatonin receptor variant impairs late glucose handling" + - CYP1A2 slow metabolizer + CLOCK evening -> "caffeine cutoff by noon, not 2pm" + - FOXO3A/CETP/IGF1R longevity markers + cardiovascular risk -> adjusted life expectancy for goal urgency + +## Extension Roadmap + +This roadmap connects brain ideas and the Genome Section Integration project (0e6a0332) into a unified implementation sequence. + +### Source Ideas +- **Brain idea 608dc733**: "Prompting Aesthetic Taste Docs via Digital Twin" -- use the twin's existing knowledge to generate personalized aesthetic preference questions +- **Brain idea 284dd487**: "Genome Types & Chronotype Trait" -- derive chronotype from 5 sleep/circadian markers + behavioral data +- **Project 0e6a0332**: "Genome Section Integration" -- unify genome data with Identity page architecture + +### Phase Dependency Graph + +``` +P1: Identity Orchestrator & Chronotype ---- (brain idea 284dd487) + | Creates identity.json, chronotype.json, + | identity service, derivation from 5 sleep markers + | + +-> P2.5: Personalized Taste Prompting --- (brain idea 608dc733) + | Uses identity context to generate smart taste questions + | Enhances existing TasteTab with twin-aware follow-ups + | + +-> P3: Mortality-Aware Goal Tracking + | Birth date + genome longevity/cardio markers -> life expectancy + | Urgency scoring for prioritized goal management + | + +-> P4: Identity Tab UI + Dashboard with summary cards for all 4 sections + Sub-routes for chronotype, taste, goals deep dives + | + +-> P5: Cross-Insights Engine + Reads all sections, generates natural-language insights + Injects identity context into CoS agent briefings +``` + +### Implementation Priority +1. **P1** -- Foundation: nothing else works without the orchestrator +2. **P2.5** -- Quick win: enhances existing Taste tab with minimal new infrastructure +3. **P3** -- New feature: mortality-aware goals need genome data flowing through identity service +4. **P4** -- UI: renders what P1-P3 produce +5. **P5** -- Polish: cross-entity reasoning requires all sections populated + +## Data Flow + +``` +User uploads 23andMe -> genome.json (117 markers, 32 categories) + | +Identity service reads 5 sleep markers + 2 caffeine markers + | +Derives chronotype.json (+ behavioral input from daily_routines enrichment) + | +Twin reads identity context -> generates personalized taste questions (P2.5) + | +User completes taste questionnaire -> taste-profile.json -> aesthetics.json + | +LLM analyzes books/movies/music docs -> seeds aesthetic profile themes + | +User sets birth date -> goals.json (life expectancy from actuarial + 10 genome markers) + | +Cross-insights engine reads all 4 sections -> generates natural-language insights + | +Identity tab renders unified dashboard with summary cards + insights + | +CoS injects identity context into agent briefings when relevant +``` + +## Files to Create/Modify + +**New files:** +- `data/digital-twin/identity.json` -- orchestrator metadata +- `data/digital-twin/chronotype.json` -- derived chronotype profile +- `data/digital-twin/aesthetics.json` -- taste profile +- `data/digital-twin/goals.json` -- mortality-aware goals +- `server/services/identity.js` -- identity orchestration service +- `server/routes/identity.js` -- API routes +- `server/lib/identityValidation.js` -- Zod schemas +- `client/src/components/digital-twin/tabs/IdentityTab.jsx` -- dashboard +- `client/src/components/digital-twin/identity/ChronotypeEditor.jsx` +- `client/src/components/digital-twin/identity/TasteQuestionnaire.jsx` +- `client/src/components/digital-twin/identity/GoalTracker.jsx` +- `client/src/components/digital-twin/identity/CrossInsights.jsx` + +**Modified files:** +- `client/src/components/digital-twin/constants.js` -- add Identity tab +- `client/src/pages/DigitalTwin.jsx` -- add Identity tab rendering +- `client/src/services/api.js` -- add identity API methods +- `server/index.js` -- mount identity routes +- `server/services/taste-questionnaire.js` -- add `generatePersonalizedTasteQuestion()` using identity context (P2.5) +- `client/src/components/digital-twin/tabs/TasteTab.jsx` -- wire personalized question generation (P2.5) + +## Design Decisions + +1. **Separate data files per section** (not one giant file) -- each section has independent update cadence and the genome file (82KB) is already large +2. **Derivation over duplication** -- chronotype reads from genome.json at query time rather than copying marker data. Identity service is the join layer +3. **Progressive disclosure** -- Identity tab shows summary cards; deep dives are sub-routes, not modals (per CLAUDE.md: all views must be deep-linkable) +4. **LLM-assisted but user-confirmed** -- aesthetic themes extracted by LLM from media lists are suggestions, not gospel. User confirms/edits +5. **No new dependencies** -- uses existing Zod, Express, React, Lucide stack +6. **Genome data stays read-only** -- identity service reads genome markers but never writes to genome.json +7. **Taste data consolidation** -- P2 created `taste-profile.json` (5 sections). P2.5 adds twin-aware personalized questions. Long-term, taste data migrates into `aesthetics.json` as the canonical aesthetic profile, with taste-profile.json as the raw questionnaire responses +8. **Weighted chronotype confidence** -- 5 sleep markers weighted by specificity: CRY1 (strongest DSPD signal) > CLOCK (evening tendency) > PER2 (circadian period) > MTNR1B (melatonin coupling) > DEC2 (duration, not phase). Behavioral data from daily_routines enrichment gets equal weight to genetic composite diff --git a/docs/research/kalshibot-health-check-2026-02-17.md b/docs/research/kalshibot-health-check-2026-02-17.md deleted file mode 100644 index 46d0f86c..00000000 --- a/docs/research/kalshibot-health-check-2026-02-17.md +++ /dev/null @@ -1,175 +0,0 @@ -# Kalshibot Health Check Analysis — 2026-02-17 - -## Summary - -**Status: DEGRADED** — 0% win rate across 3 trades, -$148.14 total loss on 2026-02-16. All 3 live trades settled at $0 (complete loss of cost basis). Shadow gamma-scalper posted +$46 on 1 trade (100% win rate). Current balance: $1,024.51. - ---- - -## Trade-by-Trade Analysis - -### Trade 1: Settlement Sniper — KXBTC-26FEB1611-B67375 (-$42.77) - -- **Ticker**: B67375 bracket ($67,250-$67,500) -- **Side**: YES (betting BTC settles in this bracket) -- **Entry**: 200 contracts @ 21c ($42.77 + $1.69 fee) at 15:54 UTC -- **Settlement**: YES = $0 at 16:00 UTC (BTC was NOT in this bracket) -- **Loss**: -$42.77 (100% of cost basis, 4.2% of balance) - -**Root cause**: The model estimated >33% fair probability for this bracket (21c + 12% edge). With 200 contracts (the configured max), Kelly sizing put $42 at risk on a single binary outcome. BTC settled outside this range, zeroing the position. - -**Key issue**: `settlementRideThreshold: 0.40` may have prevented the 60s exit window from triggering. If the model still showed 40%+ edge at t-60s, the position rode to $0 instead of exiting with a partial loss. - -### Trade 2: Coinbase Fair Value — KXBTC-26FEB1611-B67625 (-$52.79) - -- **Ticker**: B67625 bracket ($67,500-$67,750) -- **Side**: NO (betting BTC does NOT settle in this bracket) -- **Entry**: 186 contracts @ 28c ($52.79 + $2.57 fee) at 15:56 UTC -- **Settlement**: NO = $0 at 16:00 UTC (BTC WAS in this bracket — NO bet lost) -- **Loss**: -$52.79 (100% of cost basis, 5.2% of balance) - -**Root cause**: The strategy used a lowered `edgeThreshold: 0.20` (default is 0.25) and wider `maxSecondsToSettlement: 300` (default is 180). Entry at 3m35s before settlement with a 20% edge threshold allowed a signal that wouldn't have passed at the default 25% threshold. The NO side was wrong — BTC landed in this bracket. - -**Position sizing note**: 186 contracts @ 28c = $52, exceeding the 3% `maxBetPct` of ~$30. The `calculatePositionSize` method may not be correctly capping by `maxBetPct`. - -### Trade 3: Coinbase Fair Value — KXBTC-26FEB1612-B67625 (-$52.58) - -- **Ticker**: B67625 bracket ($67,500-$67,750), next hour -- **Side**: YES (betting BTC settles in this bracket) -- **Entry**: 141 contracts across 3 fills @ 37-38c ($52.58 + $2.32 fee) at 16:56 UTC -- **Settlement**: YES = $0 at 17:00 UTC (BTC was NOT in this bracket) -- **Loss**: -$52.58 (100% of cost basis, 5.1% of balance) - -**Root cause**: Same bracket, opposite side, next hour. BTC moved away from $67,500-$67,750 between 16:00 and 17:00. Higher entry price (37-38c) indicates greater model confidence, but the thesis was still wrong. - -### Shadow Trade: Gamma Scalper — KXBTC-26FEB1612-B67875 (+$46.00) - -- **Ticker**: B67875 bracket ($67,750-$68,000) -- **Side**: NO (betting BTC does NOT settle in this bracket) -- **Entry**: 50 contracts @ 8c ($4.00 + $0.26 fee) at 16:57 UTC -- **Settlement**: NO = $1.00 at 17:00 UTC ($50 proceeds, +$46 profit) -- **Edge reported**: 77.1% - -**Why it outperformed**: -1. Tiny risk: $4 total cost vs $42-52 for live strategies -2. Asymmetric payoff: 8c entry for $1 payout = 12.5x return -3. Strong signal: 77.1% edge vs 12-20% threshold for live strategies -4. Correct thesis: BTC was not in the $67,750-$68,000 range - ---- - -## Systemic Issues Identified - -### 1. Position Sizing Too Aggressive for Binary Bracket Outcomes - -All 3 live trades risked $42-52 each (4-5% of balance). Bracket markets settle at $0 or $1 — there's no partial recovery. Current `maxBetPct` settings (5% sniper, 3% fair-value) allow catastrophic per-trade losses. - -### 2. Coinbase Fair Value Config Deviates from Safer Defaults - -| Parameter | Current | Default | Risk Impact | -|-----------|---------|---------|-------------| -| `edgeThreshold` | 0.20 | 0.25 | Allows noisier signals | -| `maxSecondsToSettlement` | 300 | 180 | Enters too early, less certain | -| `exitEdgeThreshold` | 0.08 | 0.10 | Holds losing positions longer | -| `maxPositions` | 3 | 2 | More concurrent risk | - -### 3. No Per-Window Exposure Cap - -Trades 1 and 2 both targeted the 16:00 UTC settlement window. Combined exposure: $95 (9.3% of balance) on a single 15-minute interval. No mechanism caps aggregate risk per settlement window. - -### 4. Settlement Ride Exception May Amplify Losses - -The sniper's `settlementRideThreshold: 0.40` can override the forced exit at t-60s. In bracket markets where the probability model can be persistently wrong (model shows 40% edge but the bracket misses), this turns a possible small-loss exit into a guaranteed 100% loss. - -### 5. Gamma-Scalper Live Execution Gap (Root Cause Confirmed) - -Gamma-scalper is `enabled: true` in config but was blocked from live execution by the **one-position-per-settlement-window** rule in `simulation-engine.js` (lines 773-798). The engine evaluates strategies in config order: settlement-sniper → coinbase-fair-value → momentum-rider → gamma-scalper. By the time gamma-scalper generated its B67875 signal at 16:57 UTC, the coinbase-fair-value strategy had already placed a position (B67625 YES) in the 17:00 UTC settlement window, triggering the cross-position conflict check. - -**Code path**: `simulation-engine.js:773-798` — when a buy signal arrives, the engine checks if any existing position or pending reservation shares the same `close_time`. If so, the signal is rejected with `"settlement window conflict"`. Since gamma-scalper evaluates last in the strategy loop (`simulation-engine.js:680`), it always loses to earlier strategies. - -**Why the shadow trade succeeded**: Shadow evaluation (`simulation-engine.js:863-925`) runs against `shadowState.positions`, which is separate from live positions. The shadow state had no positions in the 17:00 window, so the gamma-scalper signal passed. - -**Fix required**: Strategy evaluation order should prioritize lower-risk strategies (gamma-scalper at $4/trade) over higher-risk ones ($50/trade), or the engine should collect all signals first and rank them before executing. - ---- - -## Recommended Parameter Changes - -### Immediate (config.json changes only) - -```json -{ - "strategies": { - "settlement-sniper": { - "params": { - "maxBetPct": 0.03, - "maxContracts": 100, - "settlementRideThreshold": 1.0 - } - }, - "coinbase-fair-value": { - "params": { - "edgeThreshold": 0.25, - "exitEdgeThreshold": 0.10, - "maxSecondsToSettlement": 180, - "maxPositions": 2 - } - }, - "gamma-scalper": { - "params": { - "maxPositions": 3 - } - } - } -} -``` - -**Rationale per change**: - -1. **sniper `maxBetPct` 0.05 -> 0.03**: Cap single-trade risk at 3%. Trade 1 would have risked ~$21 instead of $42. -2. **sniper `maxContracts` 200 -> 100**: Hard cap on position size. Combined with lower `maxBetPct`, prevents outsized bracket bets. -3. **sniper `settlementRideThreshold` 0.40 -> 1.0**: Effectively disables settlement riding. Forces positions to exit at t-60s instead of riding to $0. Can be re-enabled after more shadow testing validates the feature. -4. **fair-value `edgeThreshold` 0.20 -> 0.25**: Restore default. Requires 25% divergence before entry, filtering out Trade 2's 20% signal. -5. **fair-value `exitEdgeThreshold` 0.08 -> 0.10**: Exit sooner when thesis weakens. -6. **fair-value `maxSecondsToSettlement` 300 -> 180**: Restore default. Prevents entries at 3m+ before settlement where vol estimates are noisier. -7. **fair-value `maxPositions` 3 -> 2**: Reduce concurrent risk exposure. -8. **gamma-scalper `maxPositions` 2 -> 3**: Give the proven low-risk strategy more room to deploy. - -### Post-Analysis Config Audit (2026-02-17) - -After the initial health check, some parameters were applied to `config.json` but several were applied incorrectly or missed: - -| Parameter | Health Check Target | Current Config | Status | -|-----------|-------------------|----------------|--------| -| sniper `maxBetPct` | 0.02 | 0.02 | Applied (2026-02-18) | -| sniper `maxContracts` | 100 | 100 | Applied | -| sniper `settlementRideThreshold` | 1.0 | 1.0 | Applied | -| fair-value `edgeThreshold` | 0.25 | 0.25 | Applied (2026-02-18) | -| fair-value `exitEdgeThreshold` | 0.10 | 0.10 | Applied | -| fair-value `maxSecondsToSettlement` | 180 | 180 | Applied (2026-02-18) | -| fair-value `maxBetPct` | 0.02 | 0.02 | Applied (2026-02-18) | -| fair-value `maxPositions` | 2 | 2 | Applied | -| gamma-scalper `maxPositions` | 3 | 3 | Applied (2026-02-18) | -| gamma-scalper `maxEdgeSanity` | 0.95 | 0.95 | Added (2026-02-18) — per-strategy override | - -### Code Changes Applied (2026-02-18) - -1. **Strategy evaluation order by risk**: In `simulation-engine.js:634`, enabled strategies are now sorted by `maxBetPct` ascending (cheapest first). Gamma-scalper ($4/trade) gets window priority over fair-value ($50/trade). This would have allowed the +$46 gamma-scalper trade to execute live on 2026-02-16. -2. **Per-strategy edge sanity override**: In `simulation-engine.js:782`, the edge sanity cap now checks `strategy.params.maxEdgeSanity` before falling back to the global `risk.maxEdgeSanity`. Gamma-scalper's OTM bracket strategy inherently produces high-edge signals (77% in the shadow trade) that were blocked by the global 0.85 cap. Its per-strategy cap is now 0.95. - -### Code Changes Previously Needed (Kalshibot repo) - -1. ~~**Strategy evaluation order by risk** (CRITICAL): In `simulation-engine.js:680`, the strategy loop evaluates in config order. Since only one position per settlement window is allowed (line 773-798), the first strategy to claim a window wins. Change the loop to sort enabled strategies by `maxBetPct` ascending (cheapest first), so gamma-scalper ($4/trade) gets priority over fair-value ($50/trade). This single change would have allowed the +$46 gamma-scalper trade to execute live.~~ DONE -2. **Per-window exposure cap**: Already implemented at `simulation-engine.js:800-815` with `maxExposurePerWindow: 75`. This was added after the initial analysis — verify it's working correctly. -3. **Position size audit**: Verify `calculatePositionSize` in `base-strategy.js` correctly enforces `maxBetPct` — Trade 2's $52 cost exceeded the 3% cap of ~$30. - ---- - -## Impact Estimate - -If these parameter changes had been active on 2026-02-16: -- **Trade 1**: ~$21 loss instead of $42 (maxContracts: 100, maxBetPct: 0.03) — already applied -- **Trade 2**: Filtered out entirely (edgeThreshold 0.25 would reject the 20% signal) -- **Trade 3**: Likely filtered or reduced (tighter maxSecondsToSettlement: 180 blocks 4m-early entries) -- **Gamma-scalper**: With strategy-order-by-risk, would have claimed the 17:00 window first → +$46 live -- **Estimated day**: -$21 + $46 = **+$25 net** instead of -$148 — a $173 improvement diff --git a/docs/research/pumpfun-data-sources.md b/docs/research/pumpfun-data-sources.md deleted file mode 100644 index 3c9fe87a..00000000 --- a/docs/research/pumpfun-data-sources.md +++ /dev/null @@ -1,459 +0,0 @@ -# Pump.fun Launch Tracking Engine: Data Source Evaluation - -**Brain Project**: 467fbe07 — Pump.fun Launch Tracking Engine -**Date**: 2026-02-16 -**Deadline**: 2026-02-20 -**Objective**: Evaluate Helius, Birdeye, and pump.fun APIs for rate limits, auth, and schemas - ---- - -## Executive Summary - -Three primary data source categories were evaluated for the Pump.fun Launch Tracking Engine: - -1. **Helius** — Solana-native RPC/infrastructure with Enhanced APIs, webhooks, and gRPC streaming -2. **Birdeye** — DeFi analytics platform with rich token/price/trade REST APIs -3. **pump.fun Direct + Third-Party Indexers** — pump.fun's own frontend APIs plus Bitquery/bloXroute GraphQL indexers - -**Recommendation**: Use Helius (Developer tier, $49/mo) as the primary real-time data source for new token detection and transaction monitoring. Supplement with Birdeye (Starter tier, $99/mo) for enriched token analytics, price history, and holder data. Use pump.fun direct APIs sparingly for metadata not available elsewhere. - ---- - -## 1. Helius - -### Overview -Solana-native RPC and API platform. Best-in-class for raw blockchain data, real-time streaming, and transaction parsing on Solana. - -### Authentication -- API key appended as query parameter: `?api-key=YOUR_KEY` -- Keys managed via [Helius Dashboard](https://dashboard.helius.dev) -- SDK available: `@helius-labs/helius-sdk` (npm) - -### Pricing & Rate Limits - -| Plan | Cost/mo | Credits/mo | RPC RPS | DAS API RPS | Enhanced API RPS | WebSockets | -|------|---------|-----------|---------|-------------|-----------------|------------| -| Free | $0 | 1M | 10 | 2 | 2 | Standard only | -| Developer | $49 | 10M | 50 | 10 | 10 | Standard only | -| Business | $499 | 100M | 200 | 50 | 50 | Enhanced | -| Professional | $999 | 200M | 500 | 100 | 100 | Enhanced + LaserStream | - -### Key APIs for Pump.fun Tracking - -**Enhanced Transactions API** -- `POST https://api-mainnet.helius-rpc.com/v0/transactions?api-key=KEY` — parse up to 100 signatures per request -- `GET https://api-mainnet.helius-rpc.com/v0/addresses/{address}/transactions?api-key=KEY` — address history with pagination -- Parses raw Solana transactions into human-readable format -- Decodes instruction data, token transfers, balance changes -- Response includes: `description`, `type`, `source`, `fee`, `feePayer`, `signature`, `slot`, `timestamp`, `nativeTransfers`, `tokenTransfers`, `accountData`, `events` -- Filter by pump.fun program ID: `6EF8rrecthR5Dkzon8Nwu78hRvfCKubJ14M5uBEwF6P` -- Commitment levels: `finalized` (default) or `confirmed` -- Error responses: 400 (bad request), 401 (auth), 429 (rate limit), 500/503/504 (server) - -**Webhooks** -- Push-based event delivery for on-chain events -- Configurable filters: `TOKEN_MINT`, account-specific, program-specific -- Can monitor pump.fun program for new token creates and trades -- Eliminates polling — server receives events as they happen - -**gRPC Streaming (LaserStream)** -- Real-time account and transaction streams -- Filter by program owner for pump.fun bonding curve accounts -- Commitment level: `CONFIRMED` -- Tracks: operation type, user/fee payer, signatures, timestamps, balance changes -- **Note**: Professional plan only for mainnet gRPC - -**DAS (Digital Asset Standard) API** -- Token metadata, ownership, and collection queries -- Useful for enriching token data post-detection - -### Response Schema (Enhanced Transaction) -```json -{ - "description": "string", - "type": "SWAP|TOKEN_MINT|TRANSFER|...", - "source": "PUMP_FUN|RAYDIUM|...", - "fee": 5000, - "feePayer": "pubkey", - "signature": "txid", - "timestamp": 1700000000, - "nativeTransfers": [{ "fromUserAccount": "...", "toUserAccount": "...", "amount": 1000000 }], - "tokenTransfers": [{ "fromTokenAccount": "...", "toTokenAccount": "...", "tokenAmount": 1000, "mint": "..." }], - "accountData": [{ "account": "...", "nativeBalanceChange": -5000, "tokenBalanceChanges": [...] }] -} -``` - -### Strengths -- Lowest latency for new token detection (webhooks + gRPC) -- Native pump.fun program filtering -- Enhanced transaction parsing reduces client-side logic -- Staked connections on all paid plans for high tx success -- Well-documented SDK - -### Limitations -- gRPC/LaserStream requires Professional ($999/mo) for mainnet -- Enhanced WebSocket metering (3 credits/0.1MB) for new users -- Raw blockchain data — no pre-built analytics (no OHLCV, no market cap aggregation) - ---- - -## 2. Birdeye - -### Overview -DeFi analytics platform with comprehensive REST APIs for token data, pricing, trades, OHLCV, and wallet analytics. Covers Solana and 30+ other chains. - -### Authentication -- API key via header: `X-API-KEY: YOUR_KEY` -- Keys managed via [Birdeye Dashboard](https://bds.birdeye.so) -- Optional `chain` parameter defaults to Solana - -### Pricing & Rate Limits - -| Plan | Cost/mo | Compute Units | Global RPS | WebSockets | -|------|---------|--------------|-----------|------------| -| Standard (Free) | $0 | 30K | 1 | No | -| Lite | $39 | 1.5M | 15 | No | -| Starter | $99 | 5M | 15 | No | -| Premium | $199 | 15M | 50 | No | -| Premium Plus | $250 | 20M | 50 | 500 conns | -| Business (B-05) | $499 | 50M | 100 | 2000 conns | -| Business | $699 | 100M | 100 | Yes | - -**Per-Endpoint Rate Limits** (within global account limit): - -| Endpoint | Path | Max RPS | -|----------|------|---------| -| Price (single) | `/defi/price` | 300 | -| Price (multi) | `/defi/multi_price` | 300 | -| Price (historical) | `/defi/history_price` | 100 | -| Token Overview | `/defi/token_overview` | 300 | -| Token Security | `/defi/token_security` | 150 | -| Token List v3 | `/defi/v3/token/list` | 100 | -| Trades (token) | `/defi/txs/token` | 100 | -| Trades (pair) | `/defi/txs/pair` | 100 | -| OHLCV | `/defi/ohlcv` | 100 | -| Wallet Portfolio | varies | 30 rpm | - -### Key APIs for Pump.fun Tracking - -**Token Overview** (`/defi/token_overview`) -- Market cap, liquidity, volume, price change, holder count -- Single call returns comprehensive token analytics - -**Token Security** (`/defi/token_security`) -- Rug-pull risk indicators, mint authority, freeze authority -- Critical for filtering high-risk launches - -**Price APIs** (`/defi/price`, `/defi/history_price`) -- Real-time and historical pricing in SOL/USD -- Multi-token batch pricing supported - -**Trade APIs** (`/defi/txs/token`) -- Recent trades with buy/sell side, amounts, timestamps -- Pair-level trade history - -**OHLCV** (`/defi/ohlcv`) -- Candlestick data at configurable intervals -- Useful for charting and trend detection - -**Token List** (`/defi/v3/token/list`) -- Sortable by volume, market cap, price change -- Filter by timeframe for trending tokens - -### Response Schema (Token Overview) -```json -{ - "address": "mint_address", - "name": "Token Name", - "symbol": "TKN", - "decimals": 9, - "price": 0.00123, - "priceChange24hPercent": 150.5, - "volume24h": 500000, - "marketCap": 1200000, - "liquidity": 50000, - "holder": 2500, - "supply": 1000000000, - "logoURI": "https://...", - "extensions": { "website": "...", "twitter": "..." } -} -``` - -### Response Schema (Price API — `GET /defi/price?address=MINT`) -```json -{ - "success": true, - "data": { - "value": 0.38622, - "updateUnixTime": 1745058945, - "updateHumanTime": "2025-04-19T10:35:45", - "priceChange24h": 1.93, - "priceInNative": 0.00277, - "liquidity": 10854103.37 - } -} -``` - -### Strengths -- Richest analytics out of the box (market cap, liquidity, security scores) -- Pre-computed OHLCV eliminates aggregation logic -- Token security endpoint critical for filtering scams -- Batch pricing for monitoring multiple tokens -- Clean REST API, easy to integrate - -### WebSocket: New Token Listing Stream - -Available on Premium Plus ($250/mo) and above. Directly relevant for pump.fun launch detection. - -- **URL**: `wss://public-api.birdeye.so/socket/solana?x-api-key=YOUR_KEY` -- **Headers**: `Origin: ws://public-api.birdeye.so`, `Sec-WebSocket-Protocol: echo-protocol` -- **Subscribe**: `{ "type": "SUBSCRIBE_TOKEN_NEW_LISTING", "meme_platform_enabled": true, "sources": ["pump_dot_fun"] }` -- **CU cost**: 0.08 CU per byte - -Response schema: -```json -{ - "type": "TOKEN_NEW_LISTING_DATA", - "data": { - "address": "BkQfwVktcbWmxePJN5weHWJZgReWbiz8gzTdFa2w7Uds", - "decimals": 6, - "name": "Worker Cat", - "symbol": "$MCDCAT", - "liquidity": "12120.155172280874", - "liquidityAddedAt": 1720155863 - } -} -``` - -Supports `min_liquidity`/`max_liquidity` filters and 100+ DEX source filters including Raydium, Orca, Meteora, and pump.fun. - -### Compute Unit Costs (Key Endpoints) - -| Endpoint | CU Cost | Notes | -|----------|---------|-------| -| Token Price | 10 | Cheapest price check | -| Token Metadata | 5 | Very low cost | -| Token List v3 | 100 | Higher cost for list queries | -| Trades (token) | 10 | Affordable for trade monitoring | -| OHLCV | 40 | Moderate | -| Token New Listing (REST) | 80 | One-shot listing check | -| WS: New Listing | 0.08/byte | Streaming cost scales with data | -| WS: Price | 0.003/byte | Very affordable streaming | -| WS: Transactions | 0.0004/byte | Cheapest stream | - -### Limitations -- No push-based event delivery on tiers below Premium Plus (polling only) -- WebSocket access requires Premium Plus ($250/mo) minimum -- New token detection via REST has inherent latency — tokens must be indexed first -- Wallet endpoints severely rate-limited (30 rpm) -- Compute unit costs can escalate with heavy usage -- CU costs subject to change without notice - ---- - -## 3. pump.fun Direct APIs + Third-Party Indexers - -### 3a. pump.fun Frontend API (Direct) - -### Overview -pump.fun exposes several undocumented/semi-official API services. These are reverse-engineered from the frontend and may change without notice. - -### Base URLs - -| Service | URL | Purpose | -|---------|-----|---------| -| Frontend API v3 | `https://frontend-api-v3.pump.fun` | Token data, listings | -| Advanced Analytics v2 | `https://advanced-api-v2.pump.fun` | Analytics, rankings | -| Market API | `https://market-api.pump.fun` | Market data | -| Profile API | `https://profile-api.pump.fun` | User profiles | -| Swap API | `https://swap-api.pump.fun` | Token swaps | -| Volatility API v2 | `https://volatility-api-v2.pump.fun` | Volatility metrics | - -### Authentication -- JWT Bearer token: `Authorization: Bearer ` -- Required headers: `Origin: https://pump.fun`, `Accept: application/json` -- Rate limit headers in responses: `x-ratelimit-limit`, `x-ratelimit-remaining`, `x-ratelimit-reset` - -### Key Capabilities -- **483 documented endpoints** across all API versions -- Token creation details, bonding curve status, graduation tracking -- Direct access to pump.fun-specific metadata not available elsewhere -- Creator profiles and reputation data - -### Key V3 Endpoints -- `GET /coins/latest` — latest token launches -- `GET /coins/{mint}` — token details by mint address -- `GET /trades/latest` — latest trades across all tokens -- `GET /trades/token/{mint}` — trades for specific token - -### Observed Rate Limits -- ~20 requests per minute (RPM) across all endpoints -- Rate limit headers in responses: `x-ratelimit-limit`, `x-ratelimit-remaining`, `x-ratelimit-reset` -- HTTP 429 on exceeded limits -- Recommended: exponential backoff with max 3 retries - -### Limitations -- **Undocumented/unofficial** — endpoints can break without warning -- JWT auth requires mimicking browser authentication flow -- Rate limits are restrictive (~20 RPM) and undocumented officially -- No SLA or support -- Legal gray area for automated access -- WebSocket support listed as "coming soon" — not yet available - ---- - -### 3b. Bitquery (GraphQL Indexer) - -### Overview -Third-party GraphQL indexer with dedicated pump.fun query support. Real-time subscriptions for new tokens, trades, and bonding curve events. - -### Authentication -- API key via header or query parameter -- Free tier available via [Bitquery IDE](https://ide.bitquery.io) - -### Pricing - -| Plan | Cost | Points | RPS | Streams | -|------|------|--------|-----|---------| -| Developer (Free) | $0 | 1,000 | 10 req/min | 2 | -| Commercial | Custom | Custom | Custom | Custom | - -### Key APIs for Pump.fun Tracking -- **Token creation subscriptions** — real-time stream of new pump.fun launches -- **Trade subscriptions** — buy/sell with amounts and prices -- **Bonding curve status** — track graduation progress -- **ATH market cap** — all-time high calculations -- **Top traders/holders** — wallet analytics -- **Creator reputation** — all tokens by a creator address - -### GraphQL Query Example (New Token Subscription) -```graphql -subscription { - Solana { - Instructions( - where: { - Instruction: { - Program: { - Address: { is: "6EF8rrecthR5Dkzon8Nwu78hRvfCKubJ14M5uBEwF6P" } - } - } - Transaction: { Result: { Success: true } } - } - ) { - Transaction { Signature } - Instruction { - Accounts { Address Token { Mint Owner } } - Program { Method } - } - Block { Time } - } - } -} -``` - -### Limitations -- Free tier extremely limited (1,000 points, 10 req/min, 2 streams) -- Commercial pricing requires sales contact — no self-serve -- Points-based billing is opaque — hard to predict costs -- GraphQL complexity can lead to unexpected point consumption - ---- - -### 3c. bloXroute (Streaming) - -### Overview -Specializes in low-latency Solana data streaming with dedicated pump.fun channels. - -### Key Endpoints -- `GetPumpFunNewTokensStream` — real-time new token events -- `GetPumpFunSwapsStream` — real-time swap monitoring -- `GetPumpFunAMMSwapsStream` — AMM swap events post-graduation - -### Limitations -- Pricing not publicly documented -- Primarily targets high-frequency trading use cases -- Overkill for analytics/tracking use case - ---- - -## Comparison Matrix - -| Criteria | Helius | Birdeye | pump.fun Direct | Bitquery | -|----------|--------|---------|-----------------|----------| -| **New token detection latency** | ~1s (webhook/gRPC) | 5-30s (REST) / ~2s (WS) | Unknown | ~2-5s (subscription) | -| **Real-time streaming** | gRPC + WebSocket | WS w/ pump.fun filter ($250+) | No | GraphQL subscriptions | -| **Token analytics** | Raw tx data only | Rich (mcap, vol, security) | Basic metadata | Rich (GraphQL) | -| **OHLCV / Charts** | No | Yes | No | Yes | -| **Security scoring** | No | Yes | No | Partial | -| **Holder data** | Via DAS API | Via token overview | No | Yes (top 10) | -| **Auth complexity** | API key (simple) | API key (simple) | JWT (complex) | API key (simple) | -| **Stability / SLA** | Production-grade | Production-grade | No SLA, may break | Production-grade | -| **Min. useful tier** | Developer ($49) | Starter ($99) | Free (risky) | Commercial ($$?) | -| **Pump.fun specific** | Program filter | General DeFi | Native | Dedicated queries | -| **SDK / DX** | Excellent (npm SDK) | REST (straightforward) | None | GraphQL IDE | - ---- - -## Recommended Architecture - -``` - +------------------+ - | Helius ($49) | - | Webhooks/WS | - +--------+---------+ - | - New token events - Raw transactions - | - v - +------------------+ - | Tracking Engine | - | (PortOS app) | - +--------+---------+ - | - Token enrichment - Analytics queries - | - v - +------------------+ - | Birdeye ($99) | - | REST API | - +------------------+ - - Market cap, volume - - Security scores - - OHLCV data - - Holder counts -``` - -### Phase 1 (MVP): Helius Developer ($49/mo) -- Webhook listening for pump.fun program transactions -- Detect new token creates via `TOKEN_MINT` events -- Parse creator address, token mint, initial supply -- Store in PortOS data layer - -### Phase 2 (Enrichment): Add Birdeye Starter ($99/mo) -- Enrich detected tokens with market data -- Token security scoring for scam filtering -- OHLCV data for trend detection -- Track high-performing tokens over time - -### Phase 3 (Analytics): Evaluate Bitquery or pump.fun direct -- Creator reputation analysis -- Sniper account inventory -- Launch prediction model inputs - -**Total estimated cost**: $148/mo for Phase 1+2 - ---- - -## Next Steps - -1. **Create Helius account** and generate API key -2. **Set up webhook** for pump.fun program (`6EF8rrecthR5Dkzon8Nwu78hRvfCKubJ14M5uBEwF6P`) -3. **Create Birdeye account** and generate API key -4. **Build proof-of-concept** endpoint in PortOS that: - - Receives Helius webhook events - - Extracts new token mint + creator - - Enriches via Birdeye token overview - - Persists to `data/pumpfun/tokens.json` -5. **Validate latency** — measure time from on-chain creation to detection From 45ca7b21c0f1747ce21284547bff95ef20241ddc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Feb 2026 22:00:29 +0000 Subject: [PATCH 04/31] build: bump version to 0.15.1 [skip ci] --- client/package.json | 2 +- package-lock.json | 8 ++++---- package.json | 2 +- server/package.json | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/client/package.json b/client/package.json index a2df0b19..ab4d5fde 100644 --- a/client/package.json +++ b/client/package.json @@ -1,6 +1,6 @@ { "name": "portos-client", - "version": "0.15.0", + "version": "0.15.1", "private": true, "type": "module", "scripts": { diff --git a/package-lock.json b/package-lock.json index 71c877b7..bbad3597 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "portos", - "version": "0.15.0", + "version": "0.15.1", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "portos", - "version": "0.15.0", + "version": "0.15.1", "license": "MIT", "workspaces": [ "packages/*", @@ -29,7 +29,7 @@ }, "client": { "name": "portos-client", - "version": "0.15.0", + "version": "0.15.1", "dependencies": { "@dnd-kit/core": "^6.3.1", "@dnd-kit/sortable": "^10.0.0", @@ -9368,7 +9368,7 @@ }, "server": { "name": "portos-server", - "version": "0.15.0", + "version": "0.15.1", "dependencies": { "axios": "^1.7.9", "cors": "^2.8.5", diff --git a/package.json b/package.json index c79026c8..9e241994 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "portos", - "version": "0.15.0", + "version": "0.15.1", "private": true, "description": "Local dev machine App OS portal", "author": "Adam Eivy (@antic|@atomantic)", diff --git a/server/package.json b/server/package.json index ed1095a6..9849aa1f 100644 --- a/server/package.json +++ b/server/package.json @@ -1,6 +1,6 @@ { "name": "portos-server", - "version": "0.15.0", + "version": "0.15.1", "private": true, "type": "module", "scripts": { From 7eac8b87aaed0f01c3d027b62a0b3097aec41b05 Mon Sep 17 00:00:00 2001 From: Adam Eivy Date: Wed, 18 Feb 2026 14:05:43 -0800 Subject: [PATCH 05/31] fix: safe JSON parsing, null guards, and secret redaction in script output Replace raw JSON.parse with readJSONFile in apps/socialAccounts/scriptRunner to prevent server crashes on corrupted data files. Add optional chaining on app lookups and .catch on fire-and-forget clone promise. Redact env var secrets (keys, tokens, passwords) from scriptRunner lastOutput before persisting to disk. --- .changelog/v0.15.x.md | 8 ++++++++ server/routes/brain.js | 4 +++- server/services/apps.js | 16 ++++------------ server/services/scriptRunner.js | 24 ++++++++++++++++-------- server/services/socialAccounts.js | 14 +++----------- 5 files changed, 34 insertions(+), 32 deletions(-) diff --git a/.changelog/v0.15.x.md b/.changelog/v0.15.x.md index a5fc1270..927bd39a 100644 --- a/.changelog/v0.15.x.md +++ b/.changelog/v0.15.x.md @@ -6,6 +6,14 @@ Released: YYYY-MM-DD Plan cleanup and documentation reorganization. +## Fixes + +### Hardening: Safe JSON parsing & output redaction +- **Replaced raw `JSON.parse` with `readJSONFile`** in `apps.js`, `socialAccounts.js`, `scriptRunner.js` -- corrupted data files no longer crash the server +- **Added null guard** on `data?.apps?.[id]` in `apps.js` to prevent undefined access +- **Added `.catch()` handler** on fire-and-forget `cloneRepoInBackground()` in `brain.js` to prevent unhandled promise rejections +- **Added sensitive output redaction** in `scriptRunner.js` -- env vars containing keys, tokens, passwords, and secrets are now `[REDACTED]` before persisting to `scripts-state.json` + ## Improvements ### Plan & Documentation Cleanup diff --git a/server/routes/brain.js b/server/routes/brain.js index 3150b8ef..aefd4211 100644 --- a/server/routes/brain.js +++ b/server/routes/brain.js @@ -642,7 +642,9 @@ router.post('/links', asyncHandler(async (req, res) => { // If GitHub repo and auto-clone enabled, start clone in background if (isGitHubRepo && autoClone !== false) { - cloneRepoInBackground(link.id, url); + cloneRepoInBackground(link.id, url).catch(err => { + console.error(`❌ Background clone setup failed for ${link.id}: ${err.message}`); + }); } res.status(201).json(link); diff --git a/server/services/apps.js b/server/services/apps.js index 999ac9e6..588145cd 100644 --- a/server/services/apps.js +++ b/server/services/apps.js @@ -1,9 +1,8 @@ -import { readFile, writeFile } from 'fs/promises'; -import { existsSync } from 'fs'; +import { writeFile } from 'fs/promises'; import { join } from 'path'; import { v4 as uuidv4 } from 'uuid'; import EventEmitter from 'events'; -import { ensureDir, PATHS } from '../lib/fileUtils.js'; +import { ensureDir, readJSONFile, PATHS } from '../lib/fileUtils.js'; const DATA_DIR = PATHS.data; const APPS_FILE = join(DATA_DIR, 'apps.json'); @@ -36,14 +35,7 @@ async function loadApps() { await ensureDataDir(); - if (!existsSync(APPS_FILE)) { - appsCache = { apps: {} }; - cacheTimestamp = now; - return appsCache; - } - - const content = await readFile(APPS_FILE, 'utf-8'); - appsCache = JSON.parse(content); + appsCache = await readJSONFile(APPS_FILE, { apps: {} }); cacheTimestamp = now; return appsCache; } @@ -103,7 +95,7 @@ export async function getActiveApps() { */ export async function getAppById(id) { const data = await loadApps(); - const app = data.apps[id]; + const app = data?.apps?.[id]; return app ? { id, ...app } : null; } diff --git a/server/services/scriptRunner.js b/server/services/scriptRunner.js index 233b31c7..ec28caea 100644 --- a/server/services/scriptRunner.js +++ b/server/services/scriptRunner.js @@ -8,11 +8,11 @@ import { spawn } from 'child_process'; import { join, dirname } from 'path'; import { fileURLToPath } from 'url'; -import { writeFile, readFile, mkdir, readdir, rm } from 'fs/promises'; -import { existsSync } from 'fs'; +import { writeFile, mkdir, readdir, rm } from 'fs/promises'; import { v4 as uuidv4 } from 'uuid'; import Cron from 'croner'; import { cosEvents } from './cosEvents.js'; +import { readJSONFile } from '../lib/fileUtils.js'; const __filename = fileURLToPath(import.meta.url); const __dirname = dirname(__filename); @@ -43,6 +43,18 @@ const ALLOWED_SCRIPT_COMMANDS = new Set([ // Security: Reject any command containing these to prevent injection via pipes, chaining, etc. const DANGEROUS_SHELL_CHARS = /[;|&`$(){}[\]<>\\!#*?~]/; +// Patterns matching sensitive environment variable values in command output (e.g., pm2 jlist) +const SENSITIVE_ENV_PATTERN = /("[^"]*(?:KEY|SECRET|TOKEN|PASS|PASSWORD|APIKEY|API_KEY|APISEC|API_SEC|MACAROON|CERT|CREDENTIAL|AUTH)[^"]*":\s*)"[^"]+"/gi; + +/** + * Redact sensitive env vars from command output before persisting. + * Matches JSON key-value pairs where the key contains secret-like words. + */ +function redactSensitiveOutput(output) { + if (!output) return output; + return output.replace(SENSITIVE_ENV_PATTERN, '$1"[REDACTED]"'); +} + /** * Validate a script command against the allowlist * Returns { valid: boolean, error?: string, baseCommand?: string, args?: string[] } @@ -126,11 +138,7 @@ async function ensureScriptsDir() { * Load scripts state */ async function loadScriptsState() { - if (!existsSync(SCRIPTS_STATE_FILE)) { - return { scripts: {} }; - } - const content = await readFile(SCRIPTS_STATE_FILE, 'utf-8'); - return JSON.parse(content); + return readJSONFile(SCRIPTS_STATE_FILE, { scripts: {} }); } /** @@ -366,7 +374,7 @@ export async function executeScript(scriptId) { // Update script state script.lastRun = new Date().toISOString(); - script.lastOutput = fullOutput.substring(0, 10000); // Limit stored output + script.lastOutput = redactSensitiveOutput(fullOutput.substring(0, 10000)); script.lastExitCode = code; script.runCount = (script.runCount || 0) + 1; diff --git a/server/services/socialAccounts.js b/server/services/socialAccounts.js index 94c6edb9..5b61d4ad 100644 --- a/server/services/socialAccounts.js +++ b/server/services/socialAccounts.js @@ -9,12 +9,11 @@ * - Future account management automation */ -import { readFile, writeFile } from 'fs/promises'; -import { existsSync } from 'fs'; +import { writeFile } from 'fs/promises'; import { join } from 'path'; import { v4 as uuidv4 } from 'uuid'; import EventEmitter from 'events'; -import { ensureDir, PATHS } from '../lib/fileUtils.js'; +import { ensureDir, readJSONFile, PATHS } from '../lib/fileUtils.js'; const DATA_FILE = join(PATHS.digitalTwin, 'social-accounts.json'); @@ -135,14 +134,7 @@ async function loadAccounts() { await ensureDir(PATHS.digitalTwin); - if (!existsSync(DATA_FILE)) { - cache = { accounts: {} }; - cacheTimestamp = now; - return cache; - } - - const content = await readFile(DATA_FILE, 'utf-8'); - cache = JSON.parse(content); + cache = await readJSONFile(DATA_FILE, { accounts: {} }); cacheTimestamp = now; return cache; } From ebce4a6888fd9f12f2e2ed5a101bb18bc0b2df1b Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Feb 2026 22:07:19 +0000 Subject: [PATCH 06/31] build: bump version to 0.15.2 [skip ci] --- client/package.json | 2 +- package-lock.json | 8 ++++---- package.json | 2 +- server/package.json | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/client/package.json b/client/package.json index ab4d5fde..7e8b19f8 100644 --- a/client/package.json +++ b/client/package.json @@ -1,6 +1,6 @@ { "name": "portos-client", - "version": "0.15.1", + "version": "0.15.2", "private": true, "type": "module", "scripts": { diff --git a/package-lock.json b/package-lock.json index bbad3597..311c637e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "portos", - "version": "0.15.1", + "version": "0.15.2", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "portos", - "version": "0.15.1", + "version": "0.15.2", "license": "MIT", "workspaces": [ "packages/*", @@ -29,7 +29,7 @@ }, "client": { "name": "portos-client", - "version": "0.15.1", + "version": "0.15.2", "dependencies": { "@dnd-kit/core": "^6.3.1", "@dnd-kit/sortable": "^10.0.0", @@ -9368,7 +9368,7 @@ }, "server": { "name": "portos-server", - "version": "0.15.1", + "version": "0.15.2", "dependencies": { "axios": "^1.7.9", "cors": "^2.8.5", diff --git a/package.json b/package.json index 9e241994..6c0e685b 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "portos", - "version": "0.15.1", + "version": "0.15.2", "private": true, "description": "Local dev machine App OS portal", "author": "Adam Eivy (@antic|@atomantic)", diff --git a/server/package.json b/server/package.json index 9849aa1f..c7358ed9 100644 --- a/server/package.json +++ b/server/package.json @@ -1,6 +1,6 @@ { "name": "portos-server", - "version": "0.15.1", + "version": "0.15.2", "private": true, "type": "module", "scripts": { From 6c7dbd47eb5dc6b0e337ef16c2597dcc38fcda51 Mon Sep 17 00:00:00 2001 From: Adam Eivy Date: Wed, 18 Feb 2026 14:15:07 -0800 Subject: [PATCH 07/31] fix: replace YYYY-MM-DD date placeholder in release workflow The release.yml sed commands substituted version placeholders but never replaced the YYYY-MM-DD date placeholder, causing all releases since v0.11.17 to show "Released: YYYY-MM-DD". Fixed both the release body generation and the changelog archival step. Backfilled dates in archived changelog files. --- .changelog/v0.11.17.md | 2 +- .changelog/v0.12.48.md | 2 +- .changelog/v0.13.20.md | 2 +- .changelog/v0.14.21.md | 2 +- .changelog/v0.15.x.md | 5 +++++ .changelog/v0.9.19.md | 2 +- .github/workflows/release.yml | 4 ++-- 7 files changed, 12 insertions(+), 7 deletions(-) diff --git a/.changelog/v0.11.17.md b/.changelog/v0.11.17.md index 2b830a64..f84516f1 100644 --- a/.changelog/v0.11.17.md +++ b/.changelog/v0.11.17.md @@ -1,6 +1,6 @@ # Release v0.11.17 - Changelog -Released: YYYY-MM-DD +Released: 2026-02-07 ## Overview diff --git a/.changelog/v0.12.48.md b/.changelog/v0.12.48.md index 0a2e9bff..fcdcc748 100644 --- a/.changelog/v0.12.48.md +++ b/.changelog/v0.12.48.md @@ -1,6 +1,6 @@ # Release v0.12.48 - Changelog -Released: YYYY-MM-DD +Released: 2026-02-14 ## Overview diff --git a/.changelog/v0.13.20.md b/.changelog/v0.13.20.md index 6cb16815..50d36f83 100644 --- a/.changelog/v0.13.20.md +++ b/.changelog/v0.13.20.md @@ -1,6 +1,6 @@ # Release v0.13.20 - Changelog -Released: YYYY-MM-DD +Released: 2026-02-17 ## Overview diff --git a/.changelog/v0.14.21.md b/.changelog/v0.14.21.md index 88d22981..b4f64068 100644 --- a/.changelog/v0.14.21.md +++ b/.changelog/v0.14.21.md @@ -1,6 +1,6 @@ # Release v0.14.21 - Changelog -Released: YYYY-MM-DD +Released: 2026-02-18 ## Overview diff --git a/.changelog/v0.15.x.md b/.changelog/v0.15.x.md index 927bd39a..936e852f 100644 --- a/.changelog/v0.15.x.md +++ b/.changelog/v0.15.x.md @@ -8,6 +8,11 @@ Plan cleanup and documentation reorganization. ## Fixes +### CI: Release date placeholder not substituted +- **Fixed `YYYY-MM-DD` never replaced** in release notes -- the `release.yml` workflow substituted version placeholders but not the date placeholder +- **Added date substitution** to both the release body generation and the archived changelog file +- **Backfilled 4 releases** (v0.11.17, v0.12.48, v0.13.20, v0.14.21) with correct dates + ### Hardening: Safe JSON parsing & output redaction - **Replaced raw `JSON.parse` with `readJSONFile`** in `apps.js`, `socialAccounts.js`, `scriptRunner.js` -- corrupted data files no longer crash the server - **Added null guard** on `data?.apps?.[id]` in `apps.js` to prevent undefined access diff --git a/.changelog/v0.9.19.md b/.changelog/v0.9.19.md index ecd5b3bf..fc5928a4 100644 --- a/.changelog/v0.9.19.md +++ b/.changelog/v0.9.19.md @@ -1,6 +1,6 @@ # Release Notes - v0.9.19 -**Release Date:** YYYY-MM-DD +**Release Date:** 2026-02-01 ## 🎉 Features diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0c8b36ad..19101ebf 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -60,7 +60,7 @@ jobs: # Use minor version pattern changelog (e.g., v0.10.x.md) CHANGELOG=$(cat "$CHANGELOG_FILE_PATTERN") # Replace version placeholder with actual version - CHANGELOG=$(echo "$CHANGELOG" | sed "s/v${MAJOR_MINOR}.x/v${VERSION}/g" | sed "s/${MAJOR_MINOR}.x/${VERSION}/g") + CHANGELOG=$(echo "$CHANGELOG" | sed "s/v${MAJOR_MINOR}.x/v${VERSION}/g" | sed "s/${MAJOR_MINOR}.x/${VERSION}/g" | sed "s/YYYY-MM-DD/$(date +%Y-%m-%d)/g") else # Fallback: Generate changelog from commits (exclude [skip ci] commits) PREV_TAG=$(git describe --tags --abbrev=0 2>/dev/null || git rev-list --max-parents=0 HEAD) @@ -126,7 +126,7 @@ jobs: git mv "$PATTERN_FILE" "$VERSIONED_FILE" # Replace version placeholders in the renamed file - sed -i.bak "s/v${MAJOR_MINOR}\.x/v${CURRENT_VERSION}/g; s/${MAJOR_MINOR}\.x/${CURRENT_VERSION}/g" "$VERSIONED_FILE" + sed -i.bak "s/v${MAJOR_MINOR}\.x/v${CURRENT_VERSION}/g; s/${MAJOR_MINOR}\.x/${CURRENT_VERSION}/g; s/YYYY-MM-DD/$(date +%Y-%m-%d)/g" "$VERSIONED_FILE" rm "${VERSIONED_FILE}.bak" # Commit the renamed and updated changelog From 1d60a733319efb7d8883c5267e7dec332def73ce Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Feb 2026 22:16:47 +0000 Subject: [PATCH 08/31] build: bump version to 0.15.3 [skip ci] --- client/package.json | 2 +- package-lock.json | 8 ++++---- package.json | 2 +- server/package.json | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/client/package.json b/client/package.json index 7e8b19f8..857a6c6f 100644 --- a/client/package.json +++ b/client/package.json @@ -1,6 +1,6 @@ { "name": "portos-client", - "version": "0.15.2", + "version": "0.15.3", "private": true, "type": "module", "scripts": { diff --git a/package-lock.json b/package-lock.json index 311c637e..03ede312 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "portos", - "version": "0.15.2", + "version": "0.15.3", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "portos", - "version": "0.15.2", + "version": "0.15.3", "license": "MIT", "workspaces": [ "packages/*", @@ -29,7 +29,7 @@ }, "client": { "name": "portos-client", - "version": "0.15.2", + "version": "0.15.3", "dependencies": { "@dnd-kit/core": "^6.3.1", "@dnd-kit/sortable": "^10.0.0", @@ -9368,7 +9368,7 @@ }, "server": { "name": "portos-server", - "version": "0.15.2", + "version": "0.15.3", "dependencies": { "axios": "^1.7.9", "cors": "^2.8.5", diff --git a/package.json b/package.json index 6c0e685b..59f492b5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "portos", - "version": "0.15.2", + "version": "0.15.3", "private": true, "description": "Local dev machine App OS portal", "author": "Adam Eivy (@antic|@atomantic)", diff --git a/server/package.json b/server/package.json index c7358ed9..e55f0e66 100644 --- a/server/package.json +++ b/server/package.json @@ -1,6 +1,6 @@ { "name": "portos-server", - "version": "0.15.2", + "version": "0.15.3", "private": true, "type": "module", "scripts": { From 76739ebf37de68be73a66b6cb60cc54d00d21de2 Mon Sep 17 00:00:00 2001 From: Adam Eivy Date: Wed, 18 Feb 2026 14:42:34 -0800 Subject: [PATCH 09/31] =?UTF-8?q?fix:=20DRY/YAGNI/SOLID=20audit=20?= =?UTF-8?q?=E2=80=94=20fix=20bugs,=20eliminate=20duplication,=20remove=20d?= =?UTF-8?q?ead=20code?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix 10 bugs including index OOB in Windows process parsing, non-unique React keys, missing null guards, unsafe JSON.parse, dependency version mismatches, and hardcoded localhost in vite proxy. Extract loadApp middleware (11x DRY), spawnPm2Cli helper (3x DRY), useAutoRefetch hook (4x DRY), pickScheduleSettings (2x DRY), and reuse existing extractJSONArray/safeJSONParse utilities. Delete 3 dead .old.js files, unused command functions, and forbidden pm2:kill script. Net -516 lines. --- .changelog/v0.15.x.md | 24 +++ .gitignore | 1 - client/src/components/CosDashboardWidget.jsx | 42 ++-- client/src/components/DecisionLogWidget.jsx | 22 +- client/src/components/GoalProgressWidget.jsx | 22 +- client/src/components/UpcomingTasksWidget.jsx | 32 +-- client/src/hooks/useAutoRefetch.js | 28 +++ client/src/utils/formatters.js | 2 + client/vite.config.js | 7 +- package.json | 7 +- server/routes/agentTools.js | 2 +- server/routes/apps.js | 98 ++++----- server/routes/cos.js | 43 ++-- server/routes/prompts.old.js | 87 -------- server/routes/providers.old.js | 134 ------------ server/routes/runs.old.js | 191 ------------------ server/services/agents.js | 2 +- server/services/commands.js | 19 -- server/services/pm2.js | 89 +++----- 19 files changed, 182 insertions(+), 670 deletions(-) create mode 100644 client/src/hooks/useAutoRefetch.js delete mode 100644 server/routes/prompts.old.js delete mode 100644 server/routes/providers.old.js delete mode 100644 server/routes/runs.old.js diff --git a/.changelog/v0.15.x.md b/.changelog/v0.15.x.md index 936e852f..c31c994a 100644 --- a/.changelog/v0.15.x.md +++ b/.changelog/v0.15.x.md @@ -19,6 +19,30 @@ Plan cleanup and documentation reorganization. - **Added `.catch()` handler** on fire-and-forget `cloneRepoInBackground()` in `brain.js` to prevent unhandled promise rejections - **Added sensitive output redaction** in `scriptRunner.js` -- env vars containing keys, tokens, passwords, and secrets are now `[REDACTED]` before persisting to `scripts-state.json` +### Bugs Fixed +- **Fixed index out of bounds** in `agents.js` Windows process parsing -- checked `parts.length >= 6` but accessed `parts[6]` (needs `>= 7`) +- **Fixed non-unique React keys** in `UpcomingTasksWidget` -- `task.taskType` isn't unique, now uses compound keys +- **Fixed missing optional chaining** in `agentTools.js` -- `commentsResponse.comments` would throw if null +- **Fixed falsy value bug** in `apps.js` routes -- used `||` instead of `??` for PM2 process fallback +- **Fixed unsafe `JSON.parse`** in `pm2.js` `listProcesses` -- now uses `safeJSONParse` with fallback +- **Fixed silent error swallowing** in `cos.js` actionable insights -- `Promise.all` catches now log errors +- **Fixed missing date validation** in `formatters.js` -- `formatTime()` now guards against null/invalid timestamps +- **Fixed hardcoded localhost** in `vite.config.js` proxy -- now configurable via `VITE_API_HOST` env var +- **Fixed dependency version mismatches** -- aligned Express, PM2, and portos-ai-toolkit between root and server `package.json` +- **Removed forbidden `pm2:kill`** npm script that violated project rules + +### DRY Refactors +- **Extracted `spawnPm2Cli` helper** in `pm2.js` -- eliminated 3x duplicated spawn+stderr+close blocks for stop/restart/delete +- **Extracted `loadApp` middleware** in `apps.js` routes -- eliminated 11 repeated `getAppById` + null check patterns +- **Used existing `extractJSONArray`/`safeJSONParse`** in `pm2.js` -- removed duplicated JSON extraction logic +- **Created `useAutoRefetch` hook** -- consolidated identical fetch+interval `useEffect` pattern across 4 dashboard widgets +- **Extracted `pickScheduleSettings`** in `cos.js` -- eliminated duplicated `if (X !== undefined)` settings pattern +- **Removed duplicate `.pm2` entry** from `.gitignore` + +### YAGNI Cleanup +- **Deleted 3 dead `.old.js` route files** -- `prompts.old.js`, `providers.old.js`, `runs.old.js` +- **Removed unused `addAllowedCommand`/`removeAllowedCommand`** from `commands.js` -- never called from any route + ## Improvements ### Plan & Documentation Cleanup diff --git a/.gitignore b/.gitignore index 61c66341..b389bbc1 100644 --- a/.gitignore +++ b/.gitignore @@ -39,4 +39,3 @@ Thumbs.db # Browser service node_modules (code is committed) browser/node_modules/ -.pm2 diff --git a/client/src/components/CosDashboardWidget.jsx b/client/src/components/CosDashboardWidget.jsx index 7a0295d0..176d5092 100644 --- a/client/src/components/CosDashboardWidget.jsx +++ b/client/src/components/CosDashboardWidget.jsx @@ -1,4 +1,4 @@ -import { useState, useEffect, memo } from 'react'; +import { useState, memo } from 'react'; import { Link } from 'react-router-dom'; import { CheckCircle, @@ -15,40 +15,26 @@ import { Activity } from 'lucide-react'; import * as api from '../services/api'; +import { useAutoRefetch } from '../hooks/useAutoRefetch'; /** * CosDashboardWidget - Compact CoS status widget for the main Dashboard * Shows today's progress, streak status, learning health, CoS running state, and recent tasks */ const CosDashboardWidget = memo(function CosDashboardWidget() { - const [summary, setSummary] = useState(null); - const [learningSummary, setLearningSummary] = useState(null); - const [recentTasks, setRecentTasks] = useState(null); - const [activityCalendar, setActivityCalendar] = useState(null); - const [loading, setLoading] = useState(true); - const [tasksExpanded, setTasksExpanded] = useState(false); - - useEffect(() => { - const loadData = async () => { - const silent = { silent: true }; - const [quickData, learningData, tasksData, calendarData] = await Promise.all([ - api.getCosQuickSummary(silent).catch(() => null), - api.getCosLearningSummary(silent).catch(() => null), - api.getCosRecentTasks(5, silent).catch(() => null), - api.getCosActivityCalendar(8, silent).catch(() => null) - ]); - setSummary(quickData); - setLearningSummary(learningData); - setRecentTasks(tasksData); - setActivityCalendar(calendarData); - setLoading(false); - }; + const { data: dashData, loading } = useAutoRefetch(async () => { + const silent = { silent: true }; + const [summary, learningSummary, recentTasks, activityCalendar] = await Promise.all([ + api.getCosQuickSummary(silent).catch(() => null), + api.getCosLearningSummary(silent).catch(() => null), + api.getCosRecentTasks(5, silent).catch(() => null), + api.getCosActivityCalendar(8, silent).catch(() => null) + ]); + return { summary, learningSummary, recentTasks, activityCalendar }; + }, 30000); - loadData(); - // Refresh every 30 seconds - const interval = setInterval(loadData, 30000); - return () => clearInterval(interval); - }, []); + const { summary, learningSummary, recentTasks, activityCalendar } = dashData ?? {}; + const [tasksExpanded, setTasksExpanded] = useState(false); // Don't render while loading if (loading) { diff --git a/client/src/components/DecisionLogWidget.jsx b/client/src/components/DecisionLogWidget.jsx index 9c44fae7..9e9728c0 100644 --- a/client/src/components/DecisionLogWidget.jsx +++ b/client/src/components/DecisionLogWidget.jsx @@ -1,4 +1,4 @@ -import { useState, useEffect, memo } from 'react'; +import { useState, memo } from 'react'; import { Link } from 'react-router-dom'; import { Eye, @@ -12,29 +12,19 @@ import { Zap } from 'lucide-react'; import * as api from '../services/api'; +import { useAutoRefetch } from '../hooks/useAutoRefetch'; /** * DecisionLogWidget - Shows transparency into CoS decision-making * Displays why tasks were skipped, intervals adjusted, or alternatives chosen */ const DecisionLogWidget = memo(function DecisionLogWidget() { - const [summary, setSummary] = useState(null); - const [loading, setLoading] = useState(true); + const { data: summary, loading } = useAutoRefetch( + () => api.getCosDecisionSummary({ silent: true }).catch(() => null), + 60000 + ); const [expanded, setExpanded] = useState(false); - useEffect(() => { - const loadData = async () => { - const data = await api.getCosDecisionSummary({ silent: true }).catch(() => null); - setSummary(data); - setLoading(false); - }; - - loadData(); - // Refresh every 60 seconds - const interval = setInterval(loadData, 60000); - return () => clearInterval(interval); - }, []); - // Don't render while loading or if no data if (loading || !summary) { return null; diff --git a/client/src/components/GoalProgressWidget.jsx b/client/src/components/GoalProgressWidget.jsx index abfd93ed..3b3ef676 100644 --- a/client/src/components/GoalProgressWidget.jsx +++ b/client/src/components/GoalProgressWidget.jsx @@ -1,4 +1,4 @@ -import { useState, useEffect, memo } from 'react'; +import { memo } from 'react'; import { Link } from 'react-router-dom'; import { Target, @@ -7,27 +7,17 @@ import { AlertTriangle } from 'lucide-react'; import * as api from '../services/api'; +import { useAutoRefetch } from '../hooks/useAutoRefetch'; /** * GoalProgressWidget - Shows progress toward user goals on the dashboard * Maps completed CoS tasks to goal categories from COS-GOALS.md */ const GoalProgressWidget = memo(function GoalProgressWidget() { - const [progress, setProgress] = useState(null); - const [loading, setLoading] = useState(true); - - useEffect(() => { - const loadData = async () => { - const data = await api.getCosGoalProgressSummary({ silent: true }).catch(() => null); - setProgress(data); - setLoading(false); - }; - - loadData(); - // Refresh every 60 seconds - const interval = setInterval(loadData, 60000); - return () => clearInterval(interval); - }, []); + const { data: progress, loading } = useAutoRefetch( + () => api.getCosGoalProgressSummary({ silent: true }).catch(() => null), + 60000 + ); // Don't render while loading or if no goals if (loading || !progress?.goals?.length) { diff --git a/client/src/components/UpcomingTasksWidget.jsx b/client/src/components/UpcomingTasksWidget.jsx index a38065c0..984e4044 100644 --- a/client/src/components/UpcomingTasksWidget.jsx +++ b/client/src/components/UpcomingTasksWidget.jsx @@ -1,4 +1,4 @@ -import { useState, useEffect, memo } from 'react'; +import { memo } from 'react'; import { Link } from 'react-router-dom'; import { Clock, @@ -10,30 +10,20 @@ import { Sparkles } from 'lucide-react'; import * as api from '../services/api'; +import { useAutoRefetch } from '../hooks/useAutoRefetch'; /** * UpcomingTasksWidget - Shows a preview of upcoming scheduled tasks * Helps users understand what the CoS will work on next */ const UpcomingTasksWidget = memo(function UpcomingTasksWidget() { - const [upcoming, setUpcoming] = useState([]); - const [loading, setLoading] = useState(true); - - useEffect(() => { - const loadData = async () => { - const data = await api.getCosUpcomingTasks(6).catch(() => []); - setUpcoming(data); - setLoading(false); - }; - - loadData(); - // Refresh every 60 seconds - const interval = setInterval(loadData, 60000); - return () => clearInterval(interval); - }, []); + const { data: upcoming, loading } = useAutoRefetch( + () => api.getCosUpcomingTasks(6).catch(() => []), + 60000 + ); // Don't render while loading or if no upcoming tasks - if (loading || !upcoming.length) { + if (loading || !upcoming?.length) { return null; } @@ -103,9 +93,9 @@ const UpcomingTasksWidget = memo(function UpcomingTasksWidget() { {/* Task List */}
{/* Ready Tasks */} - {readyTasks.slice(0, 3).map((task) => ( + {readyTasks.slice(0, 3).map((task, index) => (