from typing import Literal

from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langgraph.checkpoint.memory import MemorySaver
from langgraph.constants import END
from langgraph.graph import MessagesState, StateGraph
from langgraph.prebuilt import ToolNode

from src.ai.langchain.init_llm import get_llm


@tool
def get_weather(query: str) -> str:
    """获取天气"""
    if "上海" in query.lower() or "shanghai" in query.lower():
        return "上海天气晴转多云，最低气温 28.5℃"
    return "天降大雨"


tools = [get_weather]

# 创建节点
tool_node = ToolNode(tools)

# 初始化大模型并绑定工具
llm = get_llm().bind_tools(tools)


# 定义函数是否执行
def should_continue(state: MessagesState) -> Literal["tools", END]:
    messages = state['messages']
    last_message = messages[-1]
    if last_message.tool_calls:
        return "tools"
    return END


# 定义调用模型的函数
def call_model(state: MessagesState):
    messages = state['messages']
    response = llm.invoke(messages)
    return {"messages": [response]}


# 初始化状态图
workflow = StateGraph(MessagesState)

# 定义图节点
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_node)

# 定义入口点
workflow.set_entry_point("agent")

# 定义条件边
workflow.add_conditional_edges("agent", should_continue)

# 定义普通边
workflow.add_edge("tools", "agent")

# 初始化内存，在图运行时保存状态
checkpointer = MemorySaver()

# 编译
app = workflow.compile(checkpointer=checkpointer)

# 执行
final_state = app.invoke({"messages": [HumanMessage(content="上海的天气怎么样")]},
                         config={"configurable": {"thread_id": "42"}})

result = final_state["messages"][-1].content
print(result)

final_state = app.invoke({"messages": [HumanMessage(content="我问的是哪个城市")]},
                         config={"configurable": {"thread_id": "42"}})
result = final_state["messages"][-1].content
print(result)

graph_png = app.get_graph().draw_mermaid_png()

with open("langgraph_base.png", "wb") as f:
    f.write(graph_png)
