from pprint import pprint
from typing import Literal, TypedDict
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool
from langgraph.graph import StateGraph, START, END,MessagesState
from langgraph.graph.message import add_messages
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langgraph.prebuilt import ToolNode
from langgraph.checkpoint.memory import MemorySaver

from config.model_config import get_chat_openai_zhipu


@tool
def search(city: str) -> str:
    """获取指定城市的天气信息"""
    if "上海" in city.lower() or 'shanghai' in city.lower():
        # return '上海是中国的首都'
        return "现在30度, 有雾"
    return "现在35度, 阳光明媚"

# 工具类别
tools = [search]

# 创建工具节点
tool_nodes = ToolNode(tools)


# 实际请以官方文档为准
model = get_chat_openai_zhipu().bind_tools(tools)

def should_continue(state: MessagesState) -> Literal["tools", END]:
    """Check if the last message is a tool call."""
    messages = state["messages"]
    last_message = messages[-1]
    if last_message.tool_calls:
        return "tools"
    return END
    # if isinstance(state["messages"][-1], ToolMessage):
    #     return "tools"
    # return END

# 1.定义调用模型的函数
def call_model(state: MessagesState) -> MessagesState:
    """Call the model with the messages."""
    messages = state["messages"]
    # messages[HumanMessage(content='上海的天气怎么样', additional_kwargs={}, response_metadata={},
    #                       id='541dac4c-be5a-4391-8f16-d34d95c6409b')]
    # 调用模型
    # 提取消息内容，创建新的 HumanMessage 实例，不包含 id
    response = model.invoke(messages)
    # 将模型的响应添加到消息列表中
    # state["messages"].append(response)
    return {"messages": [response]}

# 2.用状态初始化，定义一个新的状态图
workflow = StateGraph(MessagesState)

# 3.定义图节点, 定义我们将循环的2个节点
workflow.add_node("agent", call_model)
workflow.add_node("tools", tool_nodes)

# 4.定义入口点和图边
workflow.set_entry_point("agent")

# 添加条件边
workflow.add_conditional_edges(
    "agent",
    should_continue
)

# 定义图边
workflow.add_edge("tools", "agent")

#初始化内存在图运行之间持久化状态
checkpointer = MemorySaver() #可以存redis

# 5.编译
app = workflow.compile(checkpointer=checkpointer)

# # 6.执行图, 使用可运行对象
# final_response = app.invoke({"messages": [HumanMessage(content="上海的天气怎么样")]},
#                             config={"configurable": {"thread_id": "42"}})
#
# result = final_response['messages'][-1].content
# print(result)
# final_response = app.invoke({"messages": [HumanMessage(content="我问的哪个城市")]},
#                             config={"configurable": {"thread_id": "42"}})
# result = final_response['messages'][-1].content
# print(result)



while True:
    user_input = input("User: ")
    if user_input.lower() in ["quit", "exit", "q"]:
        print("Goodbye!")
        break
    final_response = app.invoke({"messages": [HumanMessage(content=user_input)]},
                                config={"configurable": {"thread_id": "42"}})

    # {'messages': [HumanMessage(content='上海天气怎么样', additional_kwargs={}, response_metadata={}, id='9d27ab9d-eb79-4364-b8a6-a84dceed895b'),
    #               AIMessage(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_202506261448322dd9b29b7e734d61_0', 'function': {'arguments': '{"city": "上海"}', 'name': 'search'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_callstool_calls', 'model_name': 'glm-4-flashglm-4-flash'}, id='run--47ba32a6-6fd5-467b-88d7-757b4bb0dfcd-0', tool_calls=[{'name': 'search', 'args': {'city': '上海'}, 'id': 'call_202506261448322dd9b29b7e734d61_0', 'type': 'tool_call'}], usage_metadata={'input_tokens': 145, 'output_tokens': 9, 'total_tokens': 154, 'input_token_details': {}, 'output_token_details': {}}),
    #               ToolMessage(content='现在30度, 有雾', name='search', id='b0f13d8d-6544-4637-9733-ff15cc5a9595', tool_call_id='call_202506261448322dd9b29b7e734d61_0'),
    #               AIMessage(content='上海现在的天气是30度，有雾。', additional_kwargs={}, response_metadata={'finish_reason': 'stop', 'model_name': 'glm-4-flash'}, id='run--1f8a2095-6a94-4ba7-a394-b7fa6e8fa0c4-0', usage_metadata={'input_tokens': 170, 'output_tokens': 12, 'total_tokens': 182, 'input_token_details': {}, 'output_token_details': {}})]} 打印最终响应
    # pprint(final_response)
    result = final_response['messages'][-1].content
    print(result)

