Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
100 changes: 96 additions & 4 deletions dhee/adapters/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,9 @@ def __init__(
from dhee.core.session_tracker import SessionTracker
self._tracker = SessionTracker()

# Hook registry for harness integration
self._hooks: Dict[str, List[Callable]] = self._init_hooks()

# Session tracking (kept for backward compat with session_start/session_end)
self._session_id: Optional[str] = None
self._session_start_time: Optional[float] = None
Expand All @@ -110,6 +113,46 @@ def provider(self) -> str:
def buddhi(self):
return self._buddhi

@property
def kernel(self):
"""Access the CognitionKernel for direct state manipulation."""
return self._kernel

# ------------------------------------------------------------------
# Hook registry
# ------------------------------------------------------------------

_HOOK_EVENTS = frozenset([
"pre_remember", "post_remember",
"pre_recall", "post_recall",
"pre_context", "post_context",
"pre_checkpoint", "post_checkpoint",
])

def _init_hooks(self) -> Dict[str, List[Callable]]:
"""Create a fresh hook registry."""
return {event: [] for event in self._HOOK_EVENTS}

def register_hook(self, event: str, callback: Callable) -> None:
"""Register a callback for a lifecycle event.

Events: pre_remember, post_remember, pre_recall, post_recall,
pre_context, post_context, pre_checkpoint, post_checkpoint.

Pre-hooks receive the arguments dict. Post-hooks receive the result dict.
"""
if event not in self._hooks:
raise ValueError(f"Unknown hook event: {event}. Valid: {list(self._hooks.keys())}")
self._hooks[event].append(callback)

def _fire_hooks(self, event: str, data: Any) -> None:
"""Fire all registered hooks for an event."""
for callback in self._hooks.get(event, []):
try:
callback(data)
except Exception:
logger.debug("Hook %s failed", event)

# ------------------------------------------------------------------
# Tool 1: remember
# ------------------------------------------------------------------
Expand All @@ -126,6 +169,7 @@ def remember(
checks for "remember to X when Y" patterns.
"""
uid = user_id or self._user_id
self._fire_hooks("pre_remember", {"content": content, "user_id": uid, "metadata": metadata})

# Auto-tier memory content
from dhee.core.session_tracker import classify_tier
Expand Down Expand Up @@ -157,6 +201,7 @@ def remember(
if intention:
response["detected_intention"] = intention.to_dict()

self._fire_hooks("post_remember", response)
return response

# ------------------------------------------------------------------
Expand All @@ -171,6 +216,7 @@ def recall(
) -> List[Dict[str, Any]]:
"""Search memory for relevant facts. 0 LLM calls. 1 embedding."""
uid = user_id or self._user_id
self._fire_hooks("pre_recall", {"query": query, "user_id": uid, "limit": limit})
results = self._engram.search(query, user_id=uid, limit=limit)
formatted = [
{
Expand All @@ -185,6 +231,7 @@ def recall(
signals = self._tracker.on_recall(query, formatted)
self._handle_tracker_signals(signals, uid)

self._fire_hooks("post_recall", formatted)
return formatted

# ------------------------------------------------------------------
Expand All @@ -203,15 +250,21 @@ def context(
operational: If True, return compact actionable-only format.
"""
uid = user_id or self._user_id
self._fire_hooks("pre_context", {
"task_description": task_description, "user_id": uid, "operational": operational,
})
self._tracker.on_context(task_description)
hyper_ctx = self._buddhi.get_hyper_context(
user_id=uid,
task_description=task_description,
memory=self._engram._memory,
)
if operational:
return hyper_ctx.to_operational_dict()
return hyper_ctx.to_dict()
result = hyper_ctx.to_operational_dict()
else:
result = hyper_ctx.to_dict()
self._fire_hooks("post_context", result)
return result

# ------------------------------------------------------------------
# Tool 4: checkpoint
Expand Down Expand Up @@ -253,6 +306,10 @@ def checkpoint(
8. Selective forgetting → utility-based cleanup
"""
uid = user_id or self._user_id
self._fire_hooks("pre_checkpoint", {
"summary": summary, "user_id": uid, "task_type": task_type,
"outcome_score": outcome_score, "status": status,
})
self._tracker.on_checkpoint()

# Auto-fill task_type if not provided
Expand All @@ -269,6 +326,7 @@ def checkpoint(
what_worked = outcome.get("what_worked")

result: Dict[str, Any] = {}
score = max(0.0, min(1.0, float(outcome_score))) if outcome_score is not None else None

# 1. Session digest
try:
Expand Down Expand Up @@ -298,8 +356,7 @@ def checkpoint(
pass

# 3. Outcome recording
if task_type and outcome_score is not None:
score = max(0.0, min(1.0, float(outcome_score)))
if task_type and score is not None:
insight = self._buddhi.record_outcome(
user_id=uid, task_type=task_type, score=score,
)
Expand Down Expand Up @@ -351,6 +408,7 @@ def checkpoint(
forget_result = self._kernel.selective_forget(uid)
result.update(forget_result)

self._fire_hooks("post_checkpoint", result)
return result

# ------------------------------------------------------------------
Expand Down Expand Up @@ -433,6 +491,40 @@ def _handle_tracker_signals(self, signals: Dict[str, Any], user_id: str) -> None
except Exception:
pass

# ------------------------------------------------------------------
# Cognition health (harness monitoring)
# ------------------------------------------------------------------

def cognition_health(self, user_id: Optional[str] = None) -> Dict[str, Any]:
"""Health status of all cognitive subsystems.

Returns counts, utility stats, and degradation warnings.
Useful for harness dashboards and monitoring. Zero LLM calls.
"""
uid = user_id or self._user_id
health: Dict[str, Any] = {}

health["kernel"] = self._kernel.get_stats()
health["buddhi"] = self._buddhi.get_stats()

warnings: List[str] = []
try:
policies = self._kernel.policies.get_user_policies(uid)
low_util = [p for p in policies if p.utility < -0.2 and p.apply_count >= 3]
if low_util:
warnings.append(f"{len(low_util)} policies with negative utility")
active_intentions = self._kernel.intentions.get_active(uid)
if len(active_intentions) > 20:
warnings.append(f"{len(active_intentions)} active intentions (consider cleanup)")
contradictions = self._kernel.beliefs.get_contradictions(uid)
if len(contradictions) > 5:
warnings.append(f"{len(contradictions)} unresolved belief contradictions")
except Exception:
pass

health["warnings"] = warnings
return health

# ------------------------------------------------------------------
# Phase 3: Belief management
# ------------------------------------------------------------------
Expand Down
151 changes: 29 additions & 122 deletions dhee/core/buddhi.py
Original file line number Diff line number Diff line change
Expand Up @@ -1024,63 +1024,22 @@ def reflect(
except Exception:
pass

# Delegate cross-structure learning to kernel
# Kernel handles: policy outcomes, step extraction, belief-policy decay,
# intention feedback, episode connections
success = what_worked is not None
if what_worked:
try:
matched = self._kernel.policies.match_policies(
user_id, task_type, f"{task_type} task",
)
for policy in matched:
self._kernel.policies.record_outcome(
policy.id,
success=True,
baseline_score=baseline_score,
actual_score=outcome_score,
)

completed = self._kernel.tasks.get_tasks_by_type(
user_id, task_type, limit=10,
)
if len(completed) >= 3:
task_dicts = [t.to_dict() for t in completed]
self._kernel.policies.extract_from_tasks(
user_id, task_dicts, task_type,
)
# Step-level policy extraction from failure patterns
self._kernel.policies.extract_step_policies(
user_id, task_dicts, task_type,
)
except Exception:
pass

self._kernel.record_learning_outcomes(
user_id, task_type, success=True,
baseline_score=baseline_score, actual_score=outcome_score,
)
if what_failed:
try:
matched = self._kernel.policies.match_policies(
user_id, task_type, f"{task_type} task",
)
for policy in matched:
self._kernel.policies.record_outcome(
policy.id,
success=False,
baseline_score=baseline_score,
actual_score=outcome_score,
)
except Exception:
pass

# Extract step policies from failure patterns
try:
completed = self._kernel.tasks.get_tasks_by_type(
user_id, task_type, limit=10,
)
if len(completed) >= 3:
task_dicts = [t.to_dict() for t in completed]
self._kernel.policies.extract_step_policies(
user_id, task_dicts, task_type,
)
except Exception:
pass
self._kernel.record_learning_outcomes(
user_id, task_type, success=False,
baseline_score=baseline_score, actual_score=outcome_score,
)

# Update beliefs based on outcomes (via kernel)
# Update beliefs based on outcomes (buddhi-owned: text-based matching)
if what_worked:
try:
relevant = self._kernel.beliefs.get_relevant_beliefs(
Expand All @@ -1105,70 +1064,38 @@ def reflect(
except Exception:
pass

# Cross-structure: when beliefs are challenged, check dependent policies
try:
for belief in relevant:
if belief.confidence < 0.3:
claim_words = set(belief.claim.lower().split()[:5])
for policy in self._kernel.policies._policies.values():
if policy.user_id != user_id:
continue
approach_words = set(policy.action.approach.lower().split())
if len(claim_words & approach_words) >= 2:
policy.utility *= 0.8
policy.updated_at = time.time()
except Exception:
pass

# Record outcomes on matched insights (utility tracking)
if what_worked:
try:
matched_insights = self._get_relevant_insights(
user_id, f"{task_type} task",
)
for insight in matched_insights[:5]:
insight.record_outcome(
success=True,
baseline_score=baseline_score,
actual_score=outcome_score,
)
self._save_insights()
except Exception:
pass

if what_failed:
try:
matched_insights = self._get_relevant_insights(
user_id, f"{task_type} task",
# Buddhi-owned: insight utility tracking (buddhi owns insights)
try:
matched_insights = self._get_relevant_insights(
user_id, f"{task_type} task",
)
for insight in matched_insights[:5]:
insight.record_outcome(
success=success,
baseline_score=baseline_score,
actual_score=outcome_score,
)
for insight in matched_insights[:5]:
insight.record_outcome(
success=False,
baseline_score=baseline_score,
actual_score=outcome_score,
)
self._save_insights()
except Exception:
pass
self._save_insights()
except Exception:
pass

# Record outcomes on matched contrastive pairs
# Buddhi-owned: contrastive pair utility (buddhi owns contrastive store)
try:
store = self._get_contrastive()
matched_pairs = store.retrieve_contrasts(
f"{task_type} task", user_id=user_id, limit=5,
)
task_succeeded = what_worked is not None
for pair in matched_pairs:
pair.record_outcome(
success=task_succeeded,
success=success,
baseline_score=baseline_score,
actual_score=outcome_score,
)
store._save_all()
except Exception:
pass

# Cross-structure: positive policy delta -> reinforce related heuristics
# Buddhi-owned: heuristic reinforcement from positive policy deltas
if what_worked:
try:
matched_policies = self._kernel.policies.match_policies(
Expand All @@ -1190,26 +1117,6 @@ def reflect(
except Exception:
pass

# Record outcomes on recently triggered intentions
try:
triggered = [
i for i in self._kernel.intentions._intentions.values()
if i.user_id == user_id
and i.status == "triggered"
and i.was_useful is None
]
task_succeeded = what_worked is not None and (
outcome_score is None or outcome_score >= 0.5
)
for intention in triggered:
self._kernel.intentions.record_outcome(
intention.id,
useful=task_succeeded,
outcome_score=outcome_score,
)
except Exception:
pass

return new_insights

def _validate_used_heuristics(
Expand Down
Loading
Loading