# backend/graphs/multi_agent_graph.py
"""
多智能体协作图工作流
"""

from typing import Dict, Any, List, TypedDict, Annotated
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolExecutor
from langchain.schema import BaseMessage, HumanMessage, AIMessage
from langchain_core.agents import AgentAction, AgentFinish
from ..models import ModelFactory, AIModelResponse
from ..agents.chat_agent import ChatAgent
from ..agents.coordinator import CoordinatorAgent
import asyncio
import logging

logger = logging.getLogger(__name__)

class AgentState(TypedDict):
    """智能体状态"""
    messages: Annotated[List[BaseMessage], "对话消息列表"]
    user_input: str
    selected_models: List[str]
    responses: Dict[str, AIModelResponse]
    final_response: str
    metadata: Dict[str, Any]
    next_action: str

class MultiAgentGraph:
    """多智能体协作图"""
    
    def __init__(self, api_keys: Dict[str, str]):
        self.api_keys = api_keys
        self.agents = {}
        self.coordinator = CoordinatorAgent()
        self.graph = None
        self._initialize_agents()
        self._build_graph()
    
    def _initialize_agents(self):
        """初始化智能体"""
        for model_name in ["gpt-4", "gpt-3.5-turbo", "claude-3"]:
            if model_name.startswith("gpt") and "openai" in self.api_keys:
                api_key = self.api_keys["openai"]
            elif model_name.startswith("claude") and "anthropic" in self.api_keys:
                api_key = self.api_keys["anthropic"]
            else:
                continue
            
            try:
                model = ModelFactory.create_model(model_name, api_key)
                agent = ChatAgent(model)
                self.agents[model_name] = agent
                logger.info(f"智能体 {model_name} 初始化成功")
            except Exception as e:
                logger.error(f"智能体 {model_name} 初始化失败: {e}")
    
    def _build_graph(self):
        """构建工作流图"""
        workflow = StateGraph(AgentState)
        
        # 添加节点
        workflow.add_node("coordinator", self._coordinator_node)
        workflow.add_node("parallel_chat", self._parallel_chat_node)
        workflow.add_node("synthesize", self._synthesize_node)
        
        # 添加边
        workflow.set_entry_point("coordinator")
        workflow.add_edge("coordinator", "parallel_chat")
        workflow.add_edge("parallel_chat", "synthesize")
        workflow.add_edge("synthesize", END)
        
        self.graph = workflow.compile()
        logger.info("多智能体工作流图构建完成")
    
    async def _coordinator_node(self, state: AgentState) -> AgentState:
        """协调节点 - 决定使用哪些模型"""
        try:
            # 分析用户输入，决定使用的模型
            analysis = await self.coordinator.analyze_request(
                state["user_input"],
                list(self.agents.keys())
            )
            
            state["selected_models"] = analysis.get("selected_models", list(self.agents.keys())[:2])
            state["metadata"] = analysis.get("metadata", {})
            state["next_action"] = "parallel_chat"
            
            logger.info(f"协调器选择模型: {state['selected_models']}")
            return state
            
        except Exception as e:
            logger.error(f"协调节点错误: {e}")
            state["selected_models"] = list(self.agents.keys())[:1]
            state["next_action"] = "parallel_chat"
            return state
    
    async def _parallel_chat_node(self, state: AgentState) -> AgentState:
        """并行聊天节点 - 多个模型并行生成响应"""
        try:
            # 准备消息
            messages = state["messages"] + [HumanMessage(content=state["user_input"])]
            
            # 并行调用选中的模型
            tasks = []
            for model_name in state["selected_models"]:
                if model_name in self.agents:
                    agent = self.agents[model_name]
                    task = agent.generate_response(messages)
                    tasks.append((model_name, task))
            
            # 等待所有响应
            responses = {}
            if tasks:
                results = await asyncio.gather(
                    *[task for _, task in tasks],
                    return_exceptions=True
                )
                
                for (model_name, _), result in zip(tasks, results):
                    if isinstance(result, Exception):
                        logger.error(f"模型 {model_name} 响应失败: {result}")
                        responses[model_name] = AIModelResponse(
                            content=f"模型响应失败: {str(result)}",
                            model_name=model_name,
                            provider="unknown"
                        )
                    else:
                        responses[model_name] = result
                        logger.info(f"模型 {model_name} 响应成功")
            
            state["responses"] = responses
            state["next_action"] = "synthesize"
            return state
            
        except Exception as e:
            logger.error(f"并行聊天节点错误: {e}")
            state["responses"] = {}
            state["next_action"] = "synthesize"
            return state
    
    async def _synthesize_node(self, state: AgentState) -> AgentState:
        """综合节点 - 整合多个响应"""
        try:
            responses = state["responses"]
            
            if not responses:
                state["final_response"] = "抱歉，所有模型都无法响应您的问题。"
                return state
            
            # 如果只有一个响应，直接返回
            if len(responses) == 1:
                response = next(iter(responses.values()))
                state["final_response"] = response.content
                return state
            
            # 多个响应时，进行整合
            synthesized = await self.coordinator.synthesize_responses(
                state["user_input"],
                responses
            )
            
            state["final_response"] = synthesized
            logger.info("响应综合完成")
            return state
            
        except Exception as e:
            logger.error(f"综合节点错误: {e}")
            # 降级处理：返回第一个成功的响应
            if state["responses"]:
                first_response = next(iter(state["responses"].values()))
                state["final_response"] = first_response.content
            else:
                state["final_response"] = "抱歉，处理您的请求时出现了错误。"
            return state
    
    async def run(
        self, 
        user_input: str, 
        conversation_history: List[BaseMessage] = None
    ) -> Dict[str, Any]:
        """运行多智能体工作流"""
        try:
            initial_state = AgentState(
                messages=conversation_history or [],
                user_input=user_input,
                selected_models=[],
                responses={},
                final_response="",
                metadata={},
                next_action=""
            )
            
            # 运行工作流
            final_state = await self.graph.ainvoke(initial_state)
            
            return {
                "final_response": final_state["final_response"],
                "selected_models": final_state["selected_models"],
                "individual_responses": {
                    model: resp.content 
                    for model, resp in final_state["responses"].items()
                },
                "metadata": final_state["metadata"]
            }
            
        except Exception as e:
            logger.error(f"工作流运行失败: {e}")
            return {
                "final_response": f"抱歉，处理请求时出现错误: {str(e)}",
                "selected_models": [],
                "individual_responses": {},
                "metadata": {"error": str(e)}
            }

