"""
对话管理路由
"""

import asyncio
from datetime import datetime
from typing import List, Dict, Any, Optional, AsyncGenerator, Union

from fastapi import APIRouter, HTTPException, Depends
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from sqlalchemy.orm import Session
from langgraph.types import Command

# 导入AI Agent相关模块
from AIAgents import (
    AgentFactory
)
from config.database import get_db_session
# 导入记忆管理相关模块
from memory.MemoryFactory import MemoryFactory
from pojo.ai_web_mcp_tools_model import AIWebMCPToolsModel
from utils.SendResponseDTO import SendResponseDTO

router = APIRouter()

# 全局变量存储当前使用的agent
current_agent = None


class ChatMessage(BaseModel):
    role: str
    content: str
    timestamp: str


class ChatResponse(BaseModel):
    id: int
    title: str
    messages: List[ChatMessage]
    created_at: str


class AgentInitRequest(BaseModel):
    """初始化Agent的请求模型"""
    agent_id: Optional[str] = None
    agent_code: str = "chat"  # 智能体类型代码，默认为"chat"
    agent_name: str = "智能助手"  # 默认名称
    scene_id: Optional[str] = None
    tenant_id: str = "default"  # 默认租户
    llm_code: Optional[str] = None
    embedding_code: Optional[str] = None
    sys_prompt: Optional[str] = None
    status: str = "1"  # 默认启用状态
    version: str = "1.0.0"  # 默认版本
    tools_list: Optional[List[Union[str, Dict[str, Any]]]] = None
    mcp_list: Optional[List[Union[str, Dict[str, Any]]]] = None
    rag_list: Optional[List[Dict[str, Any]]] = None
    memory_config: Optional[Dict[str, Any]] = None
    llm_params: Optional[Dict[str, Any]] = None
    embedding_params: Optional[Dict[str, Any]] = None


class ChatMessageRequest(BaseModel):
    """发送消息的请求模型"""
    message: str
    session_id: Optional[str] = None
    scene_id: Optional[str] = None


class HumanInteractionResponse(BaseModel):
    """人机交互响应模型"""
    interaction_id: str
    approved: bool
    message: Optional[str] = None


class ChatAnswerResponse(BaseModel):
    """回答响应模型"""
    success: bool
    response: str
    timestamp: str
    agent_name: str
    session_id: Optional[str] = None
    analysis: Optional[Dict[str, Any]] = None
    tool_results: Optional[List[Dict[str, Any]]] = None
    rag_results: Optional[List[Dict[str, Any]]] = None
    error: Optional[str] = None


class StreamMessage(BaseModel):
    """流式消息模型"""
    type: str  # message, think, tool, rag, error, done
    content: str
    timestamp: str
    data: Optional[Dict[str, Any]] = None


# 配置验证通用方法
def validate_agent_config(config: Dict[str, Any]) -> Dict[str, Any]:
    """验证Agent配置的通用方法"""
    required_fields = ['agent_name', 'llm_code']
    missing_fields = [field for field in required_fields if not config.get(field)]

    if missing_fields:
        # 如果缺少必要字段，抛出异常
        raise HTTPException(
            status_code=400,
            detail=f"缺少必要的配置字段: {', '.join(missing_fields)}"
        )
    return config


def validate_mcp_tools_in_chat(db: Session, mcp_list: List[Union[str, Dict[str, Any]]]) -> List[str]:
    """验证和转换 MCP 工具列表（Chat 模块专用）"""
    if not mcp_list:
        return []

    mcp_ids = []

    for mcp_item in mcp_list:
        if isinstance(mcp_item, str):
            # 如果是字符串，直接作为MCP ID
            mcp_ids.append(mcp_item)
        elif isinstance(mcp_item, dict):
            # 如果是字典，提取ID字段
            mcp_id = mcp_item.get('id') or mcp_item.get('mcp_id') or mcp_item.get('value')
            if mcp_id:
                mcp_ids.append(mcp_id)

    # 验证MCP工具是否存在且已上线
    valid_mcp_ids = []
    for mcp_id in mcp_ids:
        mcp_tool = db.query(AIWebMCPToolsModel).filter(
            AIWebMCPToolsModel.id == mcp_id,
            AIWebMCPToolsModel.status == 1  # 已上线
        ).first()

        if mcp_tool:
            valid_mcp_ids.append(mcp_id)
        else:
            print(f"警告：MCP工具 {mcp_id} 不存在或未上线，已跳过")

    return valid_mcp_ids


@router.post("/init_agent")
async def init_agent(request: AgentInitRequest, db: Session = Depends(get_db_session)):
    """初始化Agent接口"""
    global current_agent

    try:
        print(f"收到初始化Agent请求 - agent_code: {request.agent_code}, agent_name: {request.agent_name}")

        # 验证和转换 MCP 工具列表
        valid_mcp_ids = validate_mcp_tools_in_chat(db, request.mcp_list or [])

        # 处理记忆配置
        memory_config = request.memory_config or {}
        memory_config.update({
            "scene_id": request.scene_id or memory_config.get("scene_id", ""),
            "agent_id": request.agent_id or memory_config.get("agent_id", f"agent_{int(datetime.now().timestamp())}"),
            "user_id": memory_config.get("user_id", "1")
        })

        # 生成session_id
        if not memory_config.get("session_id"):
            memory_config["session_id"] = f"session_{request.agent_id}_{int(datetime.now().timestamp())}"

        # 验证记忆配置
        validated_memory_config = MemoryFactory.validate_config(memory_config)

        # 组装基础配置参数
        base_config = {
            "agent_name": request.agent_name or "智能助手",
            "agent_id": request.agent_id,
            "scene_id": request.scene_id,
            "llm_code": request.llm_code or "deepseek-chat",
            "embedding_code": request.embedding_code,
            "sys_prompt": request.sys_prompt,
            "memory_config": validated_memory_config,
            "llm_params": request.llm_params,
            "embedding_params": request.embedding_params,
            "tools_list": request.tools_list or [],
            "mcp_list": valid_mcp_ids,
            "rag_list": request.rag_list or []
        }

        # 验证和补充配置
        config = validate_agent_config(base_config)

        # 使用AgentFactory创建Agent
        agent = AgentFactory.create(agent_code=request.agent_code, **config)

        if agent is None:
            raise HTTPException(
                status_code=400,
                detail=f"无法创建Agent类型: {request.agent_code}"
            )

        # 设置为全局agent
        current_agent = agent

        # 获取Agent的记忆管理器统计信息
        memory_stats = {}
        if hasattr(agent, 'memory_manager') and agent.memory_manager:
            try:
                memory_stats = agent.memory_manager.get_memory_stats()
            except Exception as e:
                print(f"获取记忆统计信息失败: {e}")
                memory_stats = {}

        return {
            "success": True,
            "message": f"Agent '{request.agent_name}' 初始化成功",
            "agent_id": request.agent_id,
            "agent_name": request.agent_name,
            "agent_code": request.agent_code,
            "session_id": validated_memory_config["session_id"],
            "agent_config": {
                "llm_code": request.llm_code,
                "embedding_code": request.embedding_code,
                "tools_count": len(request.tools_list or []),
                "mcp_count": len(valid_mcp_ids),
                "rag_count": len(request.rag_list or []),
                "memory_type": validated_memory_config.get("type", "buffer"),
                "memory_stats": memory_stats
            },
            "timestamp": datetime.now().isoformat()
        }

    except Exception as e:
        print(f"初始化Agent失败: {e}")
        raise HTTPException(
            status_code=500,
            detail=f"初始化Agent失败: {str(e)}"
        )


@router.get("/agent_status")
async def get_agent_status():
    """获取当前Agent状态"""
    global current_agent

    if current_agent is None:
        return {
            "initialized": False,
            "message": "未初始化Agent"
        }

    return {
        "initialized": True,
        "agent_name": current_agent.agent_name,
        "agent_type": type(current_agent).__name__,
        "has_llm": current_agent.llm is not None,
        "has_embedding": current_agent.embedding is not None,
        "has_memory": current_agent.memory is not None,
        "tools_count": len(current_agent.tools) if current_agent.tools else 0,
        "mcps_count": len(current_agent.mcps) if current_agent.mcps else 0,
        "rags_count": len(current_agent.rags) if current_agent.rags else 0
    }


@router.post("/reset_agent")
async def reset_agent():
    """重置Agent"""
    global current_agent
    current_agent = None
    return {
        "success": True,
        "message": "Agent已重置"
    }


@router.get("/human_interaction_response")
async def handle_human_interaction_response(
        interaction_id: str,
        approved: str,  # URL参数为字符串
        message: Optional[str] = None
):
    """处理用户人机交互响应并流式返回执行结果"""
    global current_agent

    # 转换approved参数为布尔值
    approved_bool = approved.lower() == 'true'

    async def generate_stream() -> AsyncGenerator[str, None]:
        import json
        try:
            print(f"收到人机交互响应 - 交互ID: {interaction_id}, 批准: {approved_bool}")

            # 检查Agent状态
            if current_agent is None:
                error_msg = SendResponseDTO.create_simple_message(
                    "error", "请先初始化Agent后再发送响应"
                )
                yield f"data: {json.dumps(error_msg, ensure_ascii=False)}\n\n"
                return

            if not hasattr(current_agent, 'handle_user_response'):
                error_msg = SendResponseDTO.create_simple_message(
                    "error", "当前Agent不支持人机交互功能"
                )
                yield f"data: {json.dumps(error_msg, ensure_ascii=False)}\n\n"
                return

            # 处理用户响应
            success = await current_agent.handle_user_response(
                interaction_id=interaction_id,
                approved=approved_bool,
                message=message
            )

            if not success:
                error_msg = SendResponseDTO.create_simple_message(
                    "error", "处理用户响应失败"
                )
                yield f"data: {json.dumps(error_msg, ensure_ascii=False)}\n\n"
                return

            print(f"✅ 用户响应处理完成，准备恢复执行")
            
            # 恢复中断的执行
            if hasattr(current_agent, 'graph') and hasattr(current_agent, 'current_config'):
                print(f"使用Command(resume={approved_bool})恢复执行")
                
                try:
                    graph_stream = current_agent.graph.astream(
                        Command(resume=approved_bool), 
                        config=current_agent.current_config,
                        stream_mode="updates"
                    )
                    
                    # 处理流式响应
                    if hasattr(current_agent, '_process_graph_stream'):
                        async for chunk in current_agent._process_graph_stream(graph_stream, current_agent.current_config):
                            yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
                            await asyncio.sleep(0.01)
                    else:
                        # 直接处理graph stream
                        async for chunk in graph_stream:
                            if chunk:
                                for node_name, node_result in chunk.items():
                                    if node_name.startswith('__'):
                                        continue
                                    
                                    node_update = await current_agent._send_node_update(node_name, node_result)
                                    if node_update:
                                        yield f"data: {json.dumps(node_update, ensure_ascii=False)}\n\n"
                                        await asyncio.sleep(0.01)
                    
                except Exception as e:
                    print(f"恢复执行失败: {e}")
                    error_msg = SendResponseDTO.create_simple_message(
                        "error", f"恢复执行失败: {str(e)}"
                    )
                    yield f"data: {json.dumps(error_msg, ensure_ascii=False)}\n\n"
            else:
                # 对于不支持graph的Agent，返回适当的消息
                if approved_bool:
                    final_msg = SendResponseDTO.create_final_result_message(
                        success=True,
                        response="操作已确认执行",
                        agent_name=getattr(current_agent, 'agent_name', 'Agent')
                    )
                else:
                    final_msg = SendResponseDTO.create_final_result_message(
                        success=False,
                        response="用户取消了操作执行",
                        agent_name=getattr(current_agent, 'agent_name', 'Agent')
                    )
                yield f"data: {json.dumps(final_msg, ensure_ascii=False)}\n\n"

            # 发送完成信号
            done_msg = SendResponseDTO.create_simple_message("done", "")
            yield f"data: {json.dumps(done_msg, ensure_ascii=False)}\n\n"

        except Exception as e:
            print(f"处理人机交互响应失败: {e}")
            error_msg = SendResponseDTO.create_simple_message(
                "error", f"处理响应失败: {str(e)}"
            )
            yield f"data: {json.dumps(error_msg, ensure_ascii=False)}\n\n"

    return StreamingResponse(
        generate_stream(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Headers": "*"
        }
    )


@router.post("/answer_stream")
async def answer_stream(request: ChatMessageRequest):
    """使用初始化的Agent回答问题，支持SSE流式响应"""
    global current_agent

    async def generate_stream() -> AsyncGenerator[str, None]:
        try:
            # 检查是否已初始化agent
            if current_agent is None:
                error_msg = SendResponseDTO.create_simple_message(
                    "error", "请先初始化Agent后再发送消息"
                )
                yield f"data: {error_msg}\n\n"
                return

            # 检查scene_id是否发生变化
            if request.scene_id and hasattr(current_agent, 'scene_id'):
                if current_agent.scene_id != request.scene_id:
                    print(f"场景ID发生变化: {current_agent.scene_id} -> {request.scene_id}")
                    if hasattr(current_agent, 'setSceneId'):
                        try:
                            current_agent.setSceneId(request.scene_id)
                            print(f"场景ID已更新为: {request.scene_id}")
                        except Exception as e:
                            print(f"更新场景ID失败: {e}")

            # 使用流式处理方法
            if hasattr(current_agent, 'process_stream') and callable(getattr(current_agent, 'process_stream')):
                async for chunk in current_agent.process_stream({"input": request.message}):
                    import json
                    yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
                    await asyncio.sleep(0.01)
                
                # 发送完成信号
                done_msg = SendResponseDTO.create_simple_message("done", "")
                yield f"data: {json.dumps(done_msg, ensure_ascii=False)}\n\n"

        except Exception as e:
            print(f"流式回答问题失败: {e}")
            error_msg = SendResponseDTO.create_simple_message(
                "error", f"处理消息时发生错误: {str(e)}"
            )
            import json
            yield f"data: {json.dumps(error_msg, ensure_ascii=False)}\n\n"

    return StreamingResponse(
        generate_stream(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Headers": "*"
        }
    )
