from pprint import pprint
from typing import Literal, TypedDict

from langchain_community.vectorstores import Chroma
from langchain_openai import ChatOpenAI
from langchain_core.tools import tool, create_retriever_tool
from langgraph.graph import StateGraph, START, END,MessagesState
from langgraph.graph.message import add_messages
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langgraph.prebuilt import ToolNode
from langgraph.checkpoint.memory import MemorySaver

from config.embedding_config import get_openai_embeddings_xin, get_openai_embeddings_local
from config.model_config import get_chat_openai_zhipu, get_chat_openai_zhipu_flash_250414



# 实际请以官方文档为准
model = get_chat_openai_zhipu_flash_250414()



# 1.定义调用模型的函数
def call_model(state: MessagesState) -> MessagesState:
    """Call the model with the messages."""
    messages = state["messages"]
    # messages[HumanMessage(content='上海的天气怎么样', additional_kwargs={}, response_metadata={},
    #                       id='541dac4c-be5a-4391-8f16-d34d95c6409b')]
    # 调用模型
    # 提取消息内容，创建新的 HumanMessage 实例，不包含 id
    response = model.invoke(messages)
    # 将模型的响应添加到消息列表中
    # state["messages"].append(response)
    return {"messages": [response]}

# 2.用状态初始化，定义一个新的状态图
workflow = StateGraph(MessagesState)

# 3.定义图节点, 定义我们将循环的2个节点
workflow.add_node("agent", call_model)

# 4.定义入口点和图边
workflow.set_entry_point("agent")


# 定义图边

#初始化内存在图运行之间持久化状态
checkpointer = MemorySaver() #可以存redis

# 5.编译
app = workflow.compile(checkpointer=checkpointer)


while True:
    user_input = input("User: ")
    if user_input.lower() in ["quit", "exit", "q"]:
        print("Goodbye!")
        break
    for event in app.invoke(
            {"messages": user_input},
            config={"configurable": {"thread_id": 42}},
            stream_mode="messages",
    ):
        print(event)

