from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langchain_core.messages import HumanMessage
from langgraph.graph import START, MessagesState, StateGraph
import os
# 定义模型
llm = ChatOpenAI(model_name="deepseek-chat", api_key=os.environ["DEEPSEEK_API_KEY"],base_url=os.environ["DEEPSEEK_BASE_URL"])


#定义一个消息的状态机
workflow = StateGraph(state_schema=MessagesState)

# 定义一个函数，调用模型
def call_model(state: MessagesState):
    print("Messages before invoke:", state["messages"])
    response = llm.invoke(state["messages"])
    return {"messages": response}

#定义一个节点，将模型结果保存到数据库中
workflow.add_edge(START, "model")
workflow.add_node("model", call_model)

# 添加记忆
memory = MemorySaver()
app = workflow.compile(checkpointer=memory)

config = {"configurable": {"thread_id": "abc123"}}

while True:
    print("-" * 50)
    user_input = input("我的输入：")
    input_messages = [HumanMessage(user_input)]
    response = app.invoke({"messages": input_messages}, config)
    print("Ai机器人的回答：", response["messages"][-1])
    print("*" * 10)
    # 判断是否继续聊天
    chat_state = input("是否还继续聊天(0-否/1-是)?")
    while chat_state not in ["0", "1"]:
        chat_state = input("请只输入0或者1")
    if chat_state == "0":
        break
