from typing import Dict, Any
import time
from .schemas import WealthAdvisorState
from .tools import query_shanghai_index
from .prompts import (
    ASSESSMENT_PROMPT,
    REACTIVE_PROMPT,
    DATA_COLLECTION_PROMPT,
    ANALYSIS_PROMPT,
    RECOMMENDATION_PROMPT,
)
from .llm_client import llm

import json

# Small helper: when llm is not available, use simple deterministic mocks
def _mock_assess_query(user_query: str) -> Dict[str, str]:
    # Heuristic: if contains words like '现在' or '当前' or '涨' => reactive/emergency
    text = user_query or ""
    # check both Chinese and common English keywords
    if any(k in text for k in ["现在", "当前", "涨", "跌", "行情", "market", "trend", "current"]):
        return {"query_type": "informational", "processing_mode": "reactive", "reasoning": "包含市场/行情相关关键词，适合快速响应"}
    if any(k in text for k in ["分析", "优化", "长期", "analyze", "analysis", "optimi", "long-term"]):
        return {"query_type": "analytical", "processing_mode": "deliberative", "reasoning": "需要深度分析/长期规划"}
    return {"query_type": "informational", "processing_mode": "reactive", "reasoning": "默认快速响应"}

# 第一阶段：情境评估 - 确定查询类型和处理模式
def assess_query(state: WealthAdvisorState) -> WealthAdvisorState:
    print("[DEBUG] 进入节点: assess_query")
    """评估用户查询，确定类型和处理模式"""
    
    try:
        # 构建 prompt 文本
        prompt_text = ASSESSMENT_PROMPT.format(user_query=state.get("user_query", ""))

        # 如果有 llm 实例，调用并尝试解析 JSON 输出
        if llm is not None:
            try:
                raw = llm.generate(prompt_text, max_length=512)
                # 尝试直接解析为 JSON
                parsed = None
                try:
                    parsed = json.loads(raw)
                except Exception:
                    import re
                    m = re.search(r"\{[\s\S]*\}", raw)
                    if m:
                        try:
                            parsed = json.loads(m.group(0))
                        except Exception:
                            parsed = None
                if parsed is None:
                    result = _mock_assess_query(state.get("user_query", ""))
                else:
                    result = parsed
            except Exception as e:
                print(f"[DEBUG] assess_query llm 调用失败: {e}")
                result = _mock_assess_query(state.get("user_query", ""))
        else:
            result = _mock_assess_query(state.get("user_query", ""))

        print("[DEBUG] 评估输出:", result)
        # 获取处理模式，确保有值
        processing_mode = result.get("processing_mode", "reactive")
        if processing_mode not in ["reactive", "deliberative"]:
            processing_mode = "reactive"

        # 获取查询类型
        query_type = result.get("query_type", "informational")
        if query_type not in ["emergency", "informational", "analytical"]:
            query_type = "informational"

        updated_state = {
            **state,
            "query_type": query_type,
            "processing_mode": processing_mode,
        }
        return updated_state
    except Exception as e:
        return {
            **state,
            "error": f"评估阶段出错: {str(e)}",
            "final_response": "评估查询时发生错误，无法处理您的请求。"
        }

# 反应式处理 - 快速响应简单查询
def reactive_processing(state: WealthAdvisorState) -> WealthAdvisorState:
    print("[DEBUG] 进入节点: reactive_processing")
    """反应式处理模式，提供快速响应，支持工具调用"""
    try:
        q = state.get("user_query", "")
        # 如果用户询问行情/指数，直接调用工具
        if any(k in q for k in ["上证", "指数", "行情", "当前", "点位"]):
            resp = query_shanghai_index(q)
            return {**state, "final_response": resp}

        # 否则尝试使用 llm 快速生成回答
        if llm is not None:
            try:
                prompt_text = REACTIVE_PROMPT.format(user_query=q, customer_profile=json.dumps(state.get("customer_profile", {}), ensure_ascii=False))
                raw = llm.generate(prompt_text, max_length=256)
                # 如果返回 JSON-like，尝试提取 direct_answer 字段
                try:
                    parsed = json.loads(raw)
                    if isinstance(parsed, dict):
                        direct = parsed.get("direct_answer") or parsed.get("answer") or parsed.get("response")
                        if direct:
                            return {**state, "final_response": direct}
                except Exception:
                    pass
                return {**state, "final_response": raw}
            except Exception as e:
                print(f"[DEBUG] reactive_processing llm 调用失败: {e}")
                return {**state, "final_response": "抱歉，暂时无法获取智能回复。"}

        # 无 llm 回退静态回答
        return {**state, "final_response": "抱歉，我现在无法访问模型服务，无法提供详细回答。"}
    except Exception as e:
        return {
            **state,
            "error": f"反应式处理出错: {str(e)}",
            "final_response": "处理您的查询时发生错误，无法提供响应。"
        }

# 数据收集 - 收集进行深度分析所需的数据
def collect_data(state: WealthAdvisorState) -> WealthAdvisorState:
    print("[DEBUG] 进入节点: collect_data")
    """收集市场数据和客户信息进行深入分析"""
    
    try:
        # 构建输入
        input_data = {
            "user_query": state["user_query"],
            "customer_profile": json.dumps(state.get("customer_profile", {}), ensure_ascii=False, indent=2)
        }

        # 如果有 llm，调用并尽量解析 JSON 输出
        if llm is not None:
            try:
                prompt_text = DATA_COLLECTION_PROMPT.format(user_query=state.get("user_query", ""), customer_profile=input_data["customer_profile"])
                raw = llm.generate(prompt_text, max_length=512)
                try:
                    parsed = json.loads(raw)
                except Exception:
                    import re
                    m = re.search(r"\{[\s\S]*\}", raw)
                    parsed = json.loads(m.group(0)) if m else None
                collected = parsed.get("collected_data", {}) if isinstance(parsed, dict) else {}
                return {**state, "market_data": collected, "current_phase": "analyze"}
            except Exception as e:
                # propagate to outer except so caller logs error and we stay in collect_data
                raise

        # 无 llm 时，返回模拟数据
        simulated = {
            "上证": {"price": 3125.62, "change": 6.32, "pct": 0.20},
            "collected_data": {"shanghai_index": {"price": 3125.62, "change": 6.32}}
        }
        return {**state, "market_data": simulated.get("collected_data", {}), "current_phase": "analyze"}
    except Exception as e:
        return {
            **state,
            "error": f"数据收集阶段出错: {str(e)}",
            "current_phase": "collect_data"  # 保持在当前阶段
        }

# 深度分析 - 分析数据和客户情况
def analyze_data(state: WealthAdvisorState) -> WealthAdvisorState:
    print("[DEBUG] 进入节点: analyze_data")
    """进行深度投资分析"""
    
    try:
        # 确保必要数据已收集
        if not state.get("market_data"):
            return {
                **state,
                "error": "分析阶段缺少市场数据",
                "current_phase": "collect_data"  # 回到数据收集阶段
            }
        
        # 构建输入
        input_data = {
            "user_query": state["user_query"],
            "customer_profile": json.dumps(state.get("customer_profile", {}), ensure_ascii=False, indent=2),
            "market_data": json.dumps(state.get("market_data", {}), ensure_ascii=False, indent=2)
        }

        if llm is not None:
            prompt_text = ANALYSIS_PROMPT.format(user_query=state.get("user_query", ""), customer_profile=input_data["customer_profile"], market_data=input_data["market_data"])
            raw = llm.generate(prompt_text, max_length=1024)
            try:
                parsed = json.loads(raw)
            except Exception:
                parsed = {"text": raw}
            return {**state, "analysis_results": parsed, "current_phase": "recommend"}

        # 无 llm 回退简单分析
        simple = {"market_assessment": "市场平稳", "portfolio_analysis": {}, "recommendations": [{"text": "保持观察"}], "risk_analysis": {}, "expected_outcomes": {}}
        return {**state, "analysis_results": simple, "current_phase": "recommend"}
    except Exception as e:
        return {
            **state,
            "error": f"分析阶段出错: {str(e)}",
            "current_phase": "analyze"  # 保持在当前阶段
        }

# 生成建议 - 根据分析结果提供投资建议
def generate_recommendations(state: WealthAdvisorState) -> WealthAdvisorState:
    print("[DEBUG] 进入节点: generate_recommendations")
    """生成投资建议和行动计划"""
    
    try:
        # 确保分析结果已存在
        if not state.get("analysis_results"):
            return {
                **state,
                "error": "建议生成阶段缺少分析结果",
                "current_phase": "analyze"  # 回到分析阶段
            }
        
        prompt_text = RECOMMENDATION_PROMPT.format(
            user_query=state.get("user_query", ""),
            customer_profile=json.dumps(state.get("customer_profile", {}), ensure_ascii=False, indent=2),
            analysis_results=json.dumps(state.get("analysis_results", {}), ensure_ascii=False, indent=2),
        )

        if llm is not None:
            try:
                raw = llm.generate(prompt_text, max_length=512)
                # 返回文本直接作为最终回答
                return {**state, "final_response": raw, "current_phase": "respond"}
            except Exception as e:
                return {**state, "final_response": "基于分析，建议保持现有配置，逐步增配权益类资产，关注风险控制。", "current_phase": "respond", "error": f"LLM生成失败: {str(e)}"}

        # 无 llm 回退
        rec = "基于分析，建议保持现有配置，逐步增配权益类资产，关注风险控制。"
        return {**state, "final_response": rec, "current_phase": "respond"}
    except Exception as e:
        return {
            **state,
            "error": f"建议生成阶段出错: {str(e)}",
            "current_phase": "recommend"  # 保持在当前阶段
        }