mvi-ai-engine / core /response_engine.py
Musombi's picture
Update core/response_engine.py
46c2aba verified
import torch
from typing import Optional, Dict, List, Any
from reasoning.scraper import scrape_social_knowledge
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class ResponseEngine:
"""
Cognitive response builder for MVI-AI.
IMPORTANT:
- Does NOT generate final user text
- Builds structured context for the responder model
- Provides fallback if responder fails
"""
def __init__(self, ltm=None, registry=None):
self.ltm = ltm
self.registry = registry
# ================= MEMORY =================
def _memory_reasoning(self, text: str) -> List[str]:
if not self.ltm:
return []
try:
memories = self.ltm.retrieve_text(text, k=5)
except Exception:
memories = []
return memories if memories else []
# ================= MODEL INSIGHTS =================
def _registry_reasoning(self, registry_outputs) -> List[str]:
if not registry_outputs:
return []
insights = []
for name, tensor in registry_outputs.items():
if not isinstance(tensor, torch.Tensor):
continue
try:
score = torch.mean(tensor).item()
# Filter weak signals
if abs(score) > 0.01:
insights.append(f"{name} relevance {score:.3f}")
except Exception:
continue
return insights
# ================= SOCIAL KNOWLEDGE =================
def _social_learning(self, text: str) -> List[str]:
try:
scraped = scrape_social_knowledge(text)
except Exception:
scraped = []
if not scraped:
return []
cleaned = []
for item in scraped[:5]:
if isinstance(item, dict):
txt = item.get("text", "")
else:
txt = str(item)
# Basic filtering
if txt and len(txt.split()) > 3:
cleaned.append(txt)
return cleaned
# ================= RESPONSE GENERATION =================
def generate(
self,
text: str,
intent: Optional[str] = None,
emotion: Optional[str] = None,
model_outputs: Optional[Dict[str, torch.Tensor]] = None,
cognitive_state: Optional[torch.Tensor] = None, # compatibility
system_prompt: Optional[str] = None
) -> Dict[str, str]:
reasoning_blocks: List[Any] = []
# ===== MEMORY =====
reasoning_blocks.extend(self._memory_reasoning(text))
# ===== MODEL INSIGHTS =====
reasoning_blocks.extend(self._registry_reasoning(model_outputs))
# ===== SOCIAL FALLBACK =====
if len(reasoning_blocks) < 2:
reasoning_blocks.extend(self._social_learning(text))
# ================= CONTEXT BUILD =================
context_chunks = []
# Optional system instruction
if system_prompt:
context_chunks.append(system_prompt)
# Core input
context_chunks.append(f"User: {text}")
# Cognitive signals (for model only, not user)
if intent:
context_chunks.append(f"Intent: {intent}")
if emotion:
context_chunks.append(f"Emotion: {emotion}")
# Add reasoning knowledge
for block in reasoning_blocks:
if isinstance(block, dict):
context_chunks.append(block.get("text", ""))
else:
context_chunks.append(str(block))
# Optional instruction boost (VERY IMPORTANT)
context_chunks.append("Respond clearly and professionally.")
# Final structured prompt
structured_context = "\n".join(context_chunks)
# Fallback (if responder fails)
fallback_response = f"{text}"
return {
"context": structured_context,
"fallback": fallback_response
}