import asyncio
import os

import dotenv
from operator import add
from typing import TypedDict, Annotated

from langchain_core.messages import AnyMessage, HumanMessage, SystemMessage, AIMessage
from langchain_mcp_adapters.client import MultiServerMCPClient
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.config import get_stream_writer
from langgraph.constants import START, END
from langgraph.graph import StateGraph
from langgraph.prebuilt import create_react_agent

dotenv.load_dotenv()

nodes = ["supervisor", "travel", "joke", "couplet", "other"]

llm = ChatOpenAI(
    model="deepseek-chat",
    base_url=os.getenv("DS_BASE"),
    api_key=os.getenv("DS_API_KEY"),
    temperature=0
)


class State(TypedDict):
    messages: Annotated[list[AnyMessage], add]
    type: str


def other_node(state: State):
    print(">>> other_node")
    writer = get_stream_writer()
    writer({"node": ">>>> other_node"})
    return {"messages": [AIMessage(content="我暂时无法回答这个问题")], "type": "other"}


def supervisor_node(state: State):
    print(">>> supervisor_node")
    writer = get_stream_writer()
    writer({"node": ">>>> supervisor_node"})
    # 根据用户的问题,对问题进行分类,分类结果保存到type当中
    prompt = """你是一个专业的客服助手,负责对用户的问题进行分类,并将任务分给其他Agent执行.
    如果用户的问题是和旅游线路规划相关的,那就返回 travel .
    如果用户的问题是希望讲一个笑话,那就返回 joke .
    如果用户的问题是希望对一个对联,那就返回 couplet .
    如果是其他的问题,返回 other .
    除了这几个选项外,不要返回任何其他的内容.
    """
    prompts = [
        SystemMessage(content=prompt),
        HumanMessage(content=state["messages"][0]),
    ]

    # 如果已经有type属性了,表示问题已经交由其他节点处理完成了,就可以直接返回
    if "type" in state:
        writer({"supervisor_step": f"已获得: {state['type']} 智能体处理结果"})
        return {"type": END}
    else:
        response = llm.invoke(prompts)
        type_res = response.content
        writer({"supervisor_step": f"问题分类结果: {type_res}"})
        if type_res in nodes:
            return {"type": type_res}
        else:
            raise ValueError("type is not in (travel, joke, couplet, other)")


def travel_node(state: State):
    print(">>> travel_node")
    writer = get_stream_writer()
    writer({"node": ">>>> travel_node"})

    system_prompt = "你是一个专业的旅行规划助手,根据用户的问题,生成一个旅游路线规划.请使用中文回答,并返回一个不超过100个字规划结果"
    prompts = [
        SystemMessage(content=system_prompt),
        HumanMessage(content=state["messages"][0]),
    ]

    # MCP
    client = MultiServerMCPClient(
        # Your mcp
    )

    # langgraph 是同步执行,需要异步转同步
    tools = asyncio.run(client.get_tools())
    agent = create_react_agent(
        model=llm,
        tools=tools,
    )
    response = agent.invoke({"messages": prompts})
    writer({"travel_result": response["messages"][-1].content})
    return {"messages": [AIMessage(content=response["messages"][-1].content)], "type": "travel"}


def joke_node(state: State):
    print(">>> joke_node")
    writer = get_stream_writer()
    writer({"node": ">>>> joke_node"})

    system_prompt = "你是一个笑话大师,请根据用户的问题,写一个不超过100个字的笑话"
    prompts = [
        SystemMessage(content=system_prompt),
        HumanMessage(content=state["messages"][0]),
    ]
    response = llm.invoke(prompts)
    writer({"joke_result": response.content})
    return {"messages": [AIMessage(content=response.content)], "type": "joke"}


def couplet_node(state: State):
    print(">>> couplet_node")
    writer = get_stream_writer()
    writer({"node": ">>>> couplet_node"})
    return {"messages": [AIMessage(content="couplet_node")], "type": "couplet"}


# 条件路由
def routing_func(state: State):
    if state["type"] == "travel":
        return "travel_node"
    elif state["type"] == "joke":
        return "joke_node"
    elif state["type"] == "couplet":
        return "couplet_node"
    elif state["type"] == END:
        return END
    else:
        return "other_node"


# 构建图
builder = StateGraph(State)
# 添加节点
builder.add_node("supervisor_node", supervisor_node)
builder.add_node("travel_node", travel_node)
builder.add_node("joke_node", joke_node)
builder.add_node("couplet_node", couplet_node)
builder.add_node("other_node", other_node)
# 添加Edge
builder.add_edge(START, "supervisor_node")
builder.add_conditional_edges("supervisor_node", routing_func,
                              ["travel_node", "joke_node", "couplet_node", "other_node", END])
builder.add_edge("travel_node", "supervisor_node")
builder.add_edge("joke_node", "supervisor_node")
builder.add_edge("couplet_node", "supervisor_node")
builder.add_edge("other_node", "supervisor_node")
# 构建Graph
checkpointer = InMemorySaver()
graph = builder.compile(checkpointer=checkpointer)

# 执行任务
if __name__ == '__main__':
    config = {
        "configurable": {
            "thread_id": "1"
        }
    }

    for chunk in graph.stream(
            {"messages": ["给我讲一个郭德纲的笑话"]},
            config=config,
            stream_mode="custom"
            # stream_mode="updates"  # 状态更新
    ):
        print(chunk)

    # res = builtin-graph.invoke(
    #     {"messages": ["今天天气怎么样?"]},
    #     config=config,
    #     stream_mode="values"
    # )
    #
    # print(res["messages"][-1].content)
