# 导入必要的模块
from langchain_core.messages import AIMessage, HumanMessage
import os
from langchain.chat_models import init_chat_model

# 设置LangSmith环境变量
os.environ["LANGSMITH_TRACING"] = "true"
os.environ["LANGSMITH_PROJECT"] = "default"

# 导入LangGraph相关模块
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import START, MessagesState, StateGraph

# 定义一个新的图
workflow = StateGraph(state_schema=MessagesState)
# 初始化聊天模型
model = init_chat_model(
    base_url="https://api.openai-proxy.org/v1",
    model="gpt-3.5-turbo",
    model_provider="openai",
    api_key=os.getenv("OPENAI_API_KEY")
)

# 定义调用模型的函数
def call_model(state: MessagesState):
    # 使用当前状态中的消息调用模型
    response = model.invoke(state["messages"])
    return {"messages": response}

# 在图中定义节点和边
workflow.add_edge(START, "model")  # 添加从开始到模型节点的边
workflow.add_node("model", call_model)  # 添加模型节点

# 添加内存保存器
memory = MemorySaver()
# 编译工作流
app = workflow.compile(checkpointer=memory)

# 配置线程ID
config = {"configurable": {"thread_id": "abc123"}}

# 定义查询内容
query = "Hi! I'm Bob."
input_messages = [HumanMessage(query)]
# 调用应用并获取输出
output = app.invoke({"messages": input_messages}, config)
output["messages"][-1].pretty_print()  # 输出状态中的所有消息

query = "What's my name?"
input_messages = [HumanMessage(query)]
output = app.invoke({"messages": input_messages}, config)
output["messages"][-1].pretty_print()


config = {"configurable": {"thread_id": "abc234"}}

input_messages = [HumanMessage(query)]
output = app.invoke({"messages": input_messages}, config)
output["messages"][-1].pretty_print()