import json
import logging
import asyncio

from typing import List, Annotated, Optional, Literal, Sequence, TypedDict, Union
from pydantic import BaseModel, Field
from langchain_deepseek import ChatDeepSeek
from langchain_qwq import ChatQwQ
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.prompts import PromptTemplate
from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.checkpoint.memory import InMemorySaver
from langchain_core.tools import tool
from langchain_core.messages import SystemMessage, HumanMessage, AIMessage, ToolMessage, BaseMessage
from langchain_core.messages.ai import AIMessageChunk, ToolCallChunk
from langchain_core.runnables import RunnableConfig
from langchain_core.messages.tool import ToolMessage
from langgraph.graph import StateGraph, END
from langgraph.graph.message import add_messages
from langgraph.prebuilt import ToolNode, tools_condition
from langgraph.prebuilt import create_react_agent


# ========================= AgentState类 - 定义Agent状态 =========================
class AgentState(TypedDict):
    messages: Annotated[Sequence, add_messages]
    # 标记是否正在流式输出
    streaming: bool


# ========================= AgentGenerator类 - 生成Agent =========================
class AgentGenerator:

    def __init__(self) -> None:
        pass

    @staticmethod
    def load_model(model_name, api_type='ChatDeepSeek'):
        if api_type == 'ChatDeepSeek':
            return ChatDeepSeek(model=model_name)
        elif api_type == 'ChatQwQ':
            return ChatQwQ(model=model_name)
        else:
            return ChatDeepSeek(model=model_name)

    @staticmethod
    async def load_tools(servers_config_path):
        with open(servers_config_path, "r", encoding="utf-8") as f:
            mcp_servers = json.load(f).get("mcp_servers", {})
        mcp_client = MultiServerMCPClient(mcp_servers)
        return await mcp_client.get_tools()

    @staticmethod
    def load_prompt(prompts_file_path):
        with open(prompts_file_path, "r", encoding="utf-8") as f:
            prompt = f.read()
        return prompt

    # 创建 LangGraph parallel tool-call Agent
    async def create_parallel_tool_call_main_agent(self, model_name, servers_config_path, internal_tools, api_type='ChatDeepSeek'):

        # 加载大语言模型
        model = self.load_model(model_name, api_type)
        # 加载工具列表
        tools = await self.load_tools(servers_config_path)
        tool_map = {tool.name: tool for tool in tools}
        # 加载记忆功能
        checkpointer = InMemorySaver()

        model_with_tools = model.bind_tools(tools)

        async def call_model(state: AgentState, config: Optional[RunnableConfig] = None):
            """Agent节点：调用LLM，决定是否调用工具及调用哪些工具。"""
            response = await model_with_tools.ainvoke(state["messages"], config=config)
            return {"messages": [response]}

        async def call_tools(state: AgentState):
            """
            Tools节点：并行、异步执行所有被调用的工具。
            支持流式输出工具执行进度。
            """
            last_message = state["messages"][-1]

            if not hasattr(last_message, "tool_calls") or not last_message.tool_calls:
                return {"messages": []}

            # 并行执行所有工具调用
            tool_coroutines = []
            for tool_call in last_message.tool_calls:
                if tool_call["name"] in tool_map:
                    tool_coroutines.append(
                        tool_map[tool_call["name"]].ainvoke(tool_call["args"])
                    )
                else:
                    tool_coroutines[-1].result = f"错误：工具 '{tool_call['name']}' 未找到。"   # 如果工具不存在，返回错误信息

            # 并行执行所有工具
            tool_results = await asyncio.gather(*tool_coroutines, return_exceptions=True)

            # 处理正常及异常结果
            tool_messages = []
            for i, (tool_call, result) in enumerate(zip(last_message.tool_calls, tool_results)):
                if isinstance(result, Exception):
                    tool_messages.append(ToolMessage(
                        content=f"调用工具 {tool_call['name']} 时出错: {str(result)}",
                        name=tool_call['name'],
                        tool_call_id=tool_call['id']
                    ))
                else:
                    tool_messages.append(ToolMessage(
                        content=str(result),
                        name=tool_call['name'],
                        tool_call_id=tool_call['id']
                    ))

            return {"messages": tool_messages}

        # 创建图
        graph_builder = StateGraph(AgentState)
        # 添加节点
        graph_builder.add_node("agent", call_model)
        graph_builder.add_node("tools", call_tools)
        # 设置入口点
        graph_builder.set_entry_point("agent")
        # 使用LangGraph内置的条件判断
        graph_builder.add_conditional_edges(
            "agent",
            # 使用内置工具条件判断
            tools_condition
        )
        # 从tools节点出来后，回到agent节点
        graph_builder.add_edge("tools", "agent")
        # 编译Graph
        parallel_tool_call_main_agent = graph_builder.compile(checkpointer)

        return parallel_tool_call_main_agent

        # 创建 LangGraph Prebuilt Agent

    async def create_click_to_ask_agent(self, model_name, prompts_file_path, servers_config_path='', internal_tools=(),
                                api_type='ChatDeepSeek'):
        # 加载大语言模型
        model = self.load_model(model_name, api_type)
        # 加载提示词
        with open(prompts_file_path, "r", encoding="utf-8") as f:
            prompt = f.read()
        # 加载工具列表
        if servers_config_path:
            tools = await self.load_tools(servers_config_path) + list(internal_tools)
        else:
            tools = []
        # 加载记忆功能
        checkpointer = InMemorySaver()
        return create_react_agent(model=model, tools=tools, prompt=prompt, checkpointer=checkpointer)

    # 创建 LangGraph Prebuilt Agent
    async def create_main_agent(self, model_name, prompts_file_path, servers_config_path, internal_tools, api_type='ChatDeepSeek'):
        # 加载大语言模型
        model = self.load_model(model_name, api_type)
        # 加载提示词
        with open(prompts_file_path, "r", encoding="utf-8") as f:
            prompt = f.read()
        # 加载工具列表
        tools = await self.load_tools(servers_config_path) + internal_tools
        # 加载记忆功能
        checkpointer = InMemorySaver()
        return create_react_agent(model=model, tools=tools, prompt=prompt, checkpointer=checkpointer)

    # 创建 Assistant Agent
    async def create_assistant_agent(self, model_name, prompt):
        # 加载大语言模型
        model = ChatDeepSeek(model=model_name)
        return create_react_agent(model=model, tools=[], prompt=prompt)

