"""增强的聊天路由"""

from __future__ import annotations

import asyncio
import json
import uuid
from typing import AsyncGenerator, Dict, Any

from fastapi import APIRouter, Request
from fastapi.responses import StreamingResponse

from core.graph.chat_graph import build_chat_graph, IntentType
from core.services.llm_service import stream_chat_completion

router = APIRouter()
compiled_graph = build_chat_graph()


async def sse_event(data: Dict) -> bytes:
    """生成SSE事件数据"""
    return f"data: {json.dumps(data, ensure_ascii=False)}\n\n".encode("utf-8")


def _should_use_llm_generation(state: Dict, final_answer: str) -> bool:
    """判断是否应该使用LLM生成动态答案"""
    # 检查知识库质量
    knowledge_items = state.get("retrieved_knowledge", [])
    user_text = state.get("messages", [{}])[-2].get("content", "") if len(state.get("messages", [])) >= 2 else ""
    
    # 如果知识库内容不足，使用LLM
    if not knowledge_items or len(knowledge_items) == 0:
        return True
    
    # 检查是否是通用问答（非代码分析、错误诊断等）
    user_intent = state.get("user_intent")
    if user_intent and user_intent.value in ["qa", "other"]:
        # 对于通用问答，优先使用LLM生成更自然的回答
        return True
    
    # 检查答案是否过于模板化
    template_indicators = ["# 💡 Cairo 助手回答", "## 🧠 专业分析", "## 💡 通用建议"]
    if any(indicator in final_answer for indicator in template_indicators):
        return True
    
    return False


def _build_context_messages(state: Dict) -> List[Dict[str, Any]]:
    """构建包含上下文的消息列表"""
    messages = []
    
    # 添加系统消息
    system_prompt = """你是一个专业的Cairo智能合约开发助手。请根据用户的问题提供准确、详细且实用的回答。

特别注意：
1. 如果用户询问Cairo语言相关问题，请提供准确的技术信息
2. 对比其他语言时，请给出具体的差异点
3. 提供代码示例时，确保语法正确
4. 回答要专业但易懂，适合不同水平的开发者

请直接回答用户的问题，不需要使用特殊格式或模板。"""
    
    messages.append({"role": "system", "content": system_prompt})
    
    # 添加知识库上下文（如果有）
    knowledge_items = state.get("retrieved_knowledge", [])
    if knowledge_items:
        context_content = "参考信息：\n"
        for item in knowledge_items[:3]:  # 只取前3个最相关的
            content = item.get("content", "")
            if content:
                context_content += f"- {content[:200]}...\n"
        
        messages.append({"role": "system", "content": context_content})
    
    # 添加用户消息
    user_messages = state.get("messages", [])
    for msg in user_messages:
        if msg.get("role") == "user":
            messages.append(msg)
    
    return messages


@router.post("/chat/stream")
async def chat_stream(request: Request) -> StreamingResponse:
    """增强的流式聊天端点"""
    payload = await request.json()
    message = payload.get("message", "")
    deep_research = payload.get("deep_research", False)
    session_id = payload.get("session_id", str(uuid.uuid4()))

    # 初始化增强状态
    state = {
        "messages": [{"role": "user", "content": message}],
        "session_id": session_id,
        "user_intent": None,
        "confidence_score": 0.0,
        "cairo_context": {
            "has_code": False,
            "code_blocks": [],
            "code_type": "none",
            "syntax_errors": [],
            "ast_analysis": None,
            "complexity_score": 0.0,
            "dependencies": [],
            "security_issues": []
        },
        "error_diagnosis": None,
        "research_context": None,
        "research_depth": 3 if deep_research else 1,
        "retrieved_knowledge": [],
        "knowledge_relevance_scores": [],
        "next_action": None,
        "loop_count": 0,
        "max_loops": 5,
        "response_format": "structured",
        "include_code_examples": True,
        "include_references": True
    }

    async def event_generator() -> AsyncGenerator[bytes, None]:
        """增强的事件生成器"""
        try:
            # 发送开始处理状态
            yield await sse_event({
                "type": "status", 
                "content": "开始处理...",
                "session_id": session_id
            })
            
            # 运行增强的聊天图（修复版）
            step_count = 0
            
            # 使用LangGraph的流式执行
            try:
                # 设置递归限制配置
                config = {
                    "recursion_limit": 50,  # 增加递归限制
                }
                
                # 执行图并获取最终结果
                final_result = compiled_graph.invoke(state, config=config)
                state.update(final_result)
                
                # 检查是否有外部搜索信息
                external_search_info = state.get("external_search_info", {})
                if external_search_info.get("used_tools"):
                    used_tools = external_search_info["used_tools"]
                    search_query = state.get("messages", [{}])[-1].get("content", "相关内容")[:50]
                    
                    # 构建状态信息
                    source_status = {}
                    for tool in used_tools:
                        # 根据工具名称映射到前端期望的源名称
                        if tool == "cairo_docs":
                            source_status["cairo_docs_status"] = "completed"
                        elif tool == "github":
                            source_status["github_status"] = "completed"
                        elif tool == "stackoverflow":
                            source_status["stackoverflow_status"] = "completed"
                    
                    yield await sse_event({
                        "type": "external_search",
                        "content": f"已搜索外部资源: {', '.join(used_tools)}",
                        "sources": used_tools,
                        "query": search_query,
                        "stage": "completed",
                        **source_status
                    })
                
                # 发送节点执行状态
                yield await sse_event({
                    "type": "node", 
                    "name": "answer_generation",
                    "step": 2
                })
                
                step_count = 1
                
            except Exception as e:
                yield await sse_event({
                    "type": "error",
                    "content": f"节点执行错误: {str(e)}"
                })
            
            # 发送分析结果
            if state.get("user_intent"):
                intent_name = state["user_intent"].value if hasattr(state["user_intent"], 'value') else str(state["user_intent"])
                yield await sse_event({
                    "type": "analysis",
                    "content": f"意图识别: {intent_name}",
                    "confidence": state.get("confidence_score", 0)
                })
            
            if state.get("cairo_context", {}).get("has_code"):
                yield await sse_event({
                    "type": "analysis",
                    "content": f"代码类型: {state['cairo_context'].get('code_type', 'unknown')}"
                })
            
            if state.get("error_diagnosis"):
                yield await sse_event({
                    "type": "analysis", 
                    "content": f"错误类型: {state['error_diagnosis'].get('error_type', 'unknown')}"
                })
            
            # 外部搜索状态已在上面发送，这里不再重复
            
            # 获取最终答案
            final_messages = state.get("messages", [])
            if final_messages and final_messages[-1].get("role") == "assistant":
                final_answer = final_messages[-1].get("content", "")
                
                # 检查是否需要使用LLM生成（通过占位符或状态标记）
                use_llm = (final_answer == "LLM_GENERATION_PLACEHOLDER" or 
                          state.get("use_llm_generation", False) or
                          _should_use_llm_generation(state, final_answer))
                
                if use_llm:
                    # 使用 LLM 流式生成动态答案
                    yield await sse_event({
                        "type": "status",
                        "content": "正在生成智能回答..."
                    })
                    
                    # 构建包含上下文的消息
                    context_messages = _build_context_messages(state)
                    
                    async for chunk in stream_chat_completion(
                        context_messages,
                        deep_research=deep_research
                    ):
                        if await request.is_disconnected():
                            break
                        if chunk:
                            yield await sse_event({"type": "chunk", "content": chunk})
                else:
                    # 发送结构化答案
                    yield await sse_event({
                        "type": "structured_response",
                        "content": final_answer
                    })
            else:
                # 如果没有生成答案，使用 LLM 生成
                async for chunk in stream_chat_completion(
                    state["messages"],
                    deep_research=deep_research
                ):
                    if await request.is_disconnected():
                        break
                    if chunk:
                        yield await sse_event({"type": "chunk", "content": chunk})

            # 发送完成状态
            yield await sse_event({
                "type": "done",
                "session_id": session_id,
                "metadata": {
                    "intent": state.get("user_intent").value if state.get("user_intent") else "unknown",
                    "confidence": state.get("confidence_score", 0),
                    "has_code": state.get("cairo_context", {}).get("has_code", False),
                    "research_depth": state.get("research_depth", 0),
                    "steps_executed": step_count
                }
            })

        except Exception as e:
            yield await sse_event({
                "type": "error",
                "content": f"处理过程中发生错误: {str(e)}"
            })

    return StreamingResponse(event_generator(), media_type="text/event-stream")


@router.get("/chat/health")
async def chat_health():
    """聊天服务健康检查"""
    try:
        # 测试图编译
        test_graph = build_chat_graph()
        return {
            "status": "healthy",
            "graph_compiled": True,
            "nodes_count": len(test_graph.nodes) if hasattr(test_graph, 'nodes') else 0
        }
    except Exception as e:
        return {
            "status": "unhealthy",
            "error": str(e)
        }
