import os

from dotenv import load_dotenv
from langchain_ollama import ChatOllama
from langchain_tavily import TavilySearch
from langgraph.constants import END
from langgraph.prebuilt import create_react_agent
from langchain_core.messages import convert_to_messages
from langgraph_supervisor import create_supervisor
from langchain.chat_models import init_chat_model
from typing import Annotated
from langchain_core.tools import tool, InjectedToolCallId
from langgraph.prebuilt import InjectedState
from langgraph.graph import StateGraph, START, MessagesState
from langgraph.types import Command


def add(a: float, b: float):
    """Add two numbers."""
    return a + b


def multiply(a: float, b: float):
    """Multiply two numbers."""
    return a * b


def divide(a: float, b: float):
    """Divide two numbers."""
    return a / b


def pretty_print_message(message, indent=False):
    pretty_message = message.pretty_repr(html=True)
    if not indent:
        print(pretty_message)
        return

    indented = "\n".join("\t" + c for c in pretty_message.split("\n"))
    print(indented)


def pretty_print_messages(update, last_message=False):
    is_subgraph = False
    if isinstance(update, tuple):
        ns, update = update
        # skip parent graph updates in the printouts
        if len(ns) == 0:
            return

        graph_id = ns[-1].split(":")[0]
        print(f"Update from subgraph {graph_id}:")
        print("\n")
        is_subgraph = True

    for node_name, node_update in update.items():
        update_label = f"Update from node {node_name}:"
        if is_subgraph:
            update_label = "\t" + update_label

        print(update_label)
        print("\n")

        messages = convert_to_messages(node_update["messages"])
        if last_message:
            messages = messages[-1:]

        for m in messages:
            pretty_print_message(m, indent=is_subgraph)
        print("\n")


def create_handoff_tool(*, agent_name: str, description: str | None = None):
    name = f"transfer_to_{agent_name}"
    description = description or f"将任务交由 {agent_name} 处理。"

    @tool(name, description=description)
    def handoff_tool(
            state: Annotated[MessagesState, InjectedState],
            tool_call_id: Annotated[str, InjectedToolCallId],
    ) -> Command:
        tool_message = {
            "role": "tool",
            "content": f"任务已成功转交给 {agent_name}",
            "name": name,
            "tool_call_id": tool_call_id,
        }
        return Command(
            goto=agent_name,
            update={**state, "messages": state["messages"] + [tool_message]},
            graph=Command.PARENT,
        )

    return handoff_tool


if __name__ == '__main__':
    load_dotenv(override=True)
    model = ChatOllama(model="qwen3:30b", base_url="http://192.168.97.217:11434")
    web_search = TavilySearch(max_results=3)
    web_search_results = web_search.invoke("谁是纽约市市长?")
    print(web_search_results["results"][0]["content"])

    research_agent = create_react_agent(
        model=model,
        tools=[web_search],
        prompt=(
            "你是一个研究助理。\n\n"
            "指令：\n"
            "- 只处理与研究相关的任务，不要执行任何数学运算\n"
            "- 完成任务后，直接将结果发送给监督员（supervisor）\n"
            "- 回复时只包含研究结果，不要添加其他任何额外文本。"
        ),
        name="research_agent",
    )

    for chunk in research_agent.stream(
            {"messages": [{"role": "user", "content": "谁是纽约市市长?"}]}
    ):
        pretty_print_messages(chunk)
    print("=============================\n")

    math_agent = create_react_agent(
        model=model,
        tools=[add, multiply, divide],
        prompt=(
            "你是一个数学助理。\n\n"
            "指令：\n"
            "- 只处理与数学计算相关的任务\n"
            "- 完成任务后，直接将结果发送给监督员（supervisor）\n"
            "- 回复时只包含计算结果，不要添加其他任何额外文本。"
        ),
        name="math_agent",
    )

    for chunk in math_agent.stream(
            {"messages": [{"role": "user", "content": "what's (3 + 5) x 7"}]}
    ):
        pretty_print_messages(chunk)

    supervisor = create_supervisor(
        model=model,
        agents=[research_agent, math_agent],
        prompt=(
            "你是一个监督员，负责协调以下两个代理执行任务：\n"
            "- 研究代理：用于处理研究类问题。\n"
            "- 数学代理：用于处理数学计算任务。\n"
            "每次只能指派一个代理工作，禁止并行调用多个代理。\n"
            "你不能自己执行任何任务，只能安排代理来完成。"
        ),
        add_handoff_back_messages=True,
        output_mode="full_history",
    ).compile()

    graph = supervisor.get_graph().draw_mermaid_png()

    for chunk in supervisor.stream(
            {
                "messages": [
                    {
                        "role": "user",
                        "content": "请查找 2024 年美国和纽约州的 GDP，纽约州的 GDP 占美国 GDP 的百分比是多少？",
                    }
                ]
            },
    ):
        pretty_print_messages(chunk, last_message=True)

    final_message_history = chunk["supervisor"]["messages"]

    print(f"Final message history:{final_message_history}")
    # Handoffs
    assign_to_research_agent = create_handoff_tool(
        agent_name="research_agent",
        description="指派任务给研究代理处理",
    )

    assign_to_math_agent = create_handoff_tool(
        agent_name="math_agent",
        description="指派任务给数学代理处理",
    )

    supervisor_agent = create_react_agent(
        model=model,
        tools=[assign_to_research_agent, assign_to_math_agent],
        prompt=(
            "你是一个监督员，负责协调以下两个代理执行任务：\n"
            "- 研究代理：用于处理研究类问题。\n"
            "- 数学代理：用于处理数学计算任务。\n"
            "每次只能指派一个代理工作，禁止并行调用多个代理。\n"
            "你不能自己执行任何任务，只能安排代理来完成。"
        ),
        name="supervisor",
    )

    supervisor = (
        StateGraph(MessagesState)
        # NOTE: `destinations` is only needed for visualization and doesn't affect runtime behavior
        .add_node(supervisor_agent, destinations=("research_agent", "math_agent", END))
        .add_node(research_agent)
        .add_node(math_agent)
        .add_edge(START, "supervisor")
        # always return back to the supervisor
        .add_edge("research_agent", "supervisor")
        .add_edge("math_agent", "supervisor")
        .compile()
    )

    # 可视化状态图
    graph_image = supervisor.get_graph().draw_mermaid_png()
    with open("supervisor.png", "wb") as f:
        f.write(graph_image)

    for chunk in supervisor.stream(
            {
                "messages": [
                    {
                        "role": "user",
                        "content": "请查找 2024 年美国和纽约州的 GDP，纽约州的 GDP 占美国 GDP 的百分比是多少?",
                    }
                ]
            },
    ):
        pretty_print_messages(chunk, last_message=True)

    final_message_history = chunk["supervisor"]["messages"]

    print(f"Final message history:{final_message_history}")
