# -*- coding: utf-8 -*-
import asyncio
from typing import TypedDict, Annotated, Sequence

from langchain_core.messages import BaseMessage, ToolMessage, AIMessage, HumanMessage
from langchain_core.tools import ToolException
from langgraph.constants import START, END
from langgraph.graph import StateGraph

from core.base import chat_llm
from mcp_client.MultiClinet import getMCPTools
from util.tools import get_weather

import time
from datetime import datetime
# 定义代理状态
class AgentState(TypedDict):
    """代理的状态，包含消息历史"""
    messages: Annotated[Sequence[BaseMessage], "add_messages"]

# 工具映射（名称 -> 工具）
async def get_all_tools():
    mcp_tools = await getMCPTools()
    local_tools = [get_weather]
    all_tools = local_tools + mcp_tools
    return {t.name: t for t in all_tools}
# 推理节点：LLM 决定是否调用工具
async def reason_node(state: AgentState) -> AgentState:
    model = chat_llm()
    tools = await get_all_tools()
    # 绑定工具到模型
    model = model.bind_tools(tools=tools.values())
    # 获取当前消息
    messages = state["messages"]
    # 调用模型
    response =  model.invoke(messages)
    # 更新状态
    return {"messages": messages + [response]}


# 工具调用节点
async def tool_call_node(state: AgentState) -> AgentState:
    messages = state["messages"]
    last_message = messages[-1]

    # 如果最后一条消息包含工具调用
    if last_message.tool_calls:
        tool_results = []
        tools = await get_all_tools()
        for tool_call in last_message.tool_calls:
            tool_name = tool_call["name"]
            tool_args = tool_call["args"]
            tool_id = tool_call["id"]

            try:
                tool = tools.get(tool_name)
                if not tool:
                    raise ToolException(f"Tool {tool_name}' not found.")
                result = await tool.ainvoke(tool_args)
                tool_results.append(
                    ToolMessage(
                        content=str(result),
                        tool_call_id=tool_id,
                        name=tool_name,
                    )
                )
            except Exception as e:
                tool_results.append(
                    ToolMessage(
                        content=f"Error: {str(e)}",
                        tool_call_id=tool_id,
                        name=tool_name,
                    )
                )

        return {"messages": messages + tool_results}
    return {"messages": messages}

# 条件路由：决定下一步是推理还是结束
def should_call_tool(state: AgentState) -> str:
    last_message = state["messages"][-1]
    if isinstance(last_message, AIMessage) and last_message.tool_calls:
        return "tool_call"
    return "end"

# 构建图
async def create_charts_react_agent():
    graph_builder = StateGraph(AgentState)

    # 定义节点
    graph_builder.add_node("agent", reason_node)
    graph_builder.add_node("tool_call", tool_call_node)

    # 定义边
    graph_builder.add_edge(START, "agent")
    graph_builder.add_conditional_edges(
        "agent",
        should_call_tool,
        {
            "tool_call": "tool_call",
            "end": END,
        },
    )
    graph_builder.add_edge("tool_call", END)
    graph = graph_builder.compile()
    # try:
    #     graph.get_graph().draw_mermaid_png(output_file_path="graph.png")
    # except Exception:
    #     # This requires some extra dependencies and is optional
    #     pass
    # 编译图
    return graph

# 运行代理
