|
|
|
|
|
|
|
|
import json |
|
|
import traceback |
|
|
from typing import Dict, Any, TYPE_CHECKING |
|
|
from .schemas import TraceLog, ReflectorOutput |
|
|
from .memory_store import MemoryStore |
|
|
|
|
|
if TYPE_CHECKING: |
|
|
from LLM import LLMService |
|
|
|
|
|
class Reflector: |
|
|
def __init__(self, llm_service: 'LLMService', memory_store: MemoryStore): |
|
|
self.llm_service = llm_service |
|
|
self.memory_store = memory_store |
|
|
print("✅ Learning Hub: Reflector (Hybrid Aware) loaded") |
|
|
|
|
|
async def analyze_trade_outcome(self, trade_object: Dict[str, Any], close_reason: str): |
|
|
try: |
|
|
decision_data = trade_object.get('decision_data', {}) |
|
|
|
|
|
trace_log = TraceLog( |
|
|
decision_context=decision_data, |
|
|
market_context_at_decision=decision_data.get('market_context_at_decision', {}), |
|
|
indicators_at_decision=decision_data.get('indicators_at_decision', {}), |
|
|
|
|
|
hybrid_weights_used=decision_data.get('hybrid_weights_at_entry', {}), |
|
|
closed_trade_object=trade_object, |
|
|
actual_outcome_reason=close_reason |
|
|
) |
|
|
|
|
|
prompt = self._create_reflector_prompt(trace_log, decision_data) |
|
|
response_text = await self.llm_service._call_llm(prompt) |
|
|
|
|
|
if not response_text: raise ValueError("LLM returned no response.") |
|
|
|
|
|
reflector_json = self.llm_service._parse_llm_response_enhanced( |
|
|
response_text, fallback_strategy="reflection", symbol=trade_object.get('symbol', 'N/A') |
|
|
) |
|
|
|
|
|
if not reflector_json: raise ValueError("Failed to parse LLM response") |
|
|
|
|
|
reflector_output = ReflectorOutput(**reflector_json) |
|
|
domain = self._determine_domain(trade_object.get('strategy', 'general'), reflector_output.error_mode) |
|
|
|
|
|
await self.memory_store.save_new_delta(reflector_output, trade_object, domain) |
|
|
print(f"✅ [Reflector] Analyzed {trade_object.get('symbol')}. New Delta created.") |
|
|
|
|
|
except Exception as e: |
|
|
print(f"❌ [Reflector] Analysis failed: {e}") |
|
|
traceback.print_exc() |
|
|
|
|
|
def _determine_domain(self, strategy: str, error_mode: str) -> str: |
|
|
em = error_mode.lower() |
|
|
if "pattern" in em: return "pattern" |
|
|
if "indicator" in em or "rsi" in em: return "indicator" |
|
|
if "monte_carlo" in em: return "monte_carlo" |
|
|
if "titan" in em or "model" in em: return "strategy" |
|
|
if "news" in em: return "general" |
|
|
return "strategy" |
|
|
|
|
|
def _create_reflector_prompt(self, trace_log: TraceLog, decision_data: Dict) -> str: |
|
|
trade = trace_log.closed_trade_object |
|
|
pnl = trade.get('pnl_percent', 0) |
|
|
is_success = pnl > 0.1 |
|
|
|
|
|
|
|
|
comps = decision_data.get('components', {}) |
|
|
hybrid_context = f""" |
|
|
* Hybrid Score Breakdown (Total: {trade.get('score', 'N/A'):.4f}): |
|
|
- Titan Model Score: {comps.get('titan_score', 'N/A'):.2f} |
|
|
- Chart Patterns Score: {comps.get('patterns_score', 'N/A'):.2f} |
|
|
- Monte Carlo Score: {comps.get('mc_score', 'N/A'):.2f} |
|
|
* Weights Used at Entry: {json.dumps(trace_log.hybrid_weights_used)} |
|
|
""" |
|
|
news_context = f""" |
|
|
* News VADER Score: {decision_data.get('news_score', 0.0):.4f} |
|
|
* News Text: {decision_data.get('news_text', 'N/A')} |
|
|
""" |
|
|
|
|
|
return f""" |
|
|
SYSTEM: You are an expert trading analyst Reflector. Analyze this "Trace" of a hybrid AI trading decision. |
|
|
Did the primary model (Titan) mislead us, or did the secondary models (Patterns/MC) cause a false positive? |
|
|
|
|
|
--- TRACE LOG START --- |
|
|
1. DECISION CONTEXT: |
|
|
* Strategy: {trade.get('strategy', 'N/A')} |
|
|
{hybrid_context} |
|
|
* Entry Reasoning: {decision_data.get('reasoning', 'N/A')[:300]}... |
|
|
|
|
|
2. NEWS CONTEXT: |
|
|
{news_context} |
|
|
|
|
|
3. OUTCOME: |
|
|
* Final PnL: {pnl:+.2f}% |
|
|
* Close Reason: {trace_log.actual_outcome_reason} |
|
|
|
|
|
--- TRACE LOG END --- |
|
|
|
|
|
TASK: Identify the root cause of success/failure. Focus on which hybrid component was most accurate/inaccurate. |
|
|
SUGGEST RULE (Delta): Max 25 words. E.g., "If Titan > 0.95 but Patterns < 0.60, reduce position size." |
|
|
|
|
|
OUTPUT JSON: |
|
|
{{ |
|
|
"success": {str(is_success).lower()}, |
|
|
"score": 0.0, |
|
|
"error_mode": "Short description (e.g., 'titan_false_positive_ignored_patterns')", |
|
|
"suggested_rule": "Concise 25-word rule.", |
|
|
"confidence": 0.0 |
|
|
}} |
|
|
""" |
|
|
return prompt |