from typing import Annotated, TypedDict

from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import add_messages, StateGraph, START, END

from src.agent.my_llm import llm


class ChatState(TypedDict):
    messages: Annotated[list, add_messages]


def chat_bot(state: ChatState):
    return {"messages": [llm.invoke(state["messages"])]}


builder = StateGraph(ChatState)

builder.add_node(chat_bot, "chat_bot")
builder.add_edge(START, "chat_bot")
builder.add_edge("chat_bot", END)

memory = MemorySaver()
graph = builder.compile(checkpointer=memory)

config = {
    "configurable": {
        "thread_id": "session_10"
    }
}

state1 = graph.invoke({"messages": [{"role": "user", "content": "你好啊！我是ALM"}]}, config=config)
state2 = graph.invoke({"messages": [{"role": "user", "content": "你好啊！我是谁？"}]}, config=config)
print(f"state2: {state2}")
