from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage,SystemMessage

from langgraph.graph import add_messages,StateGraph,START,END
from langgraph.checkpoint.memory import MemorySaver

from typing import TypedDict,Annotated




api_key = "sk-6S0PtpNia71gjcfwSsDPsJ9mGqsVPr2XRQzAx1dHbJS7RW4t"
api_base="https://chatapi.littlewheat.com/v1"

llm = ChatOpenAI(model="gpt-4o",api_key=api_key,base_url=api_base)

class State(TypedDict):
    messages:Annotated[list,add_messages]

def call_model(state:State):
    response = llm.invoke(state["messages"])
    return {"messages":response}

def translate_message(state:State):
    system_prompt = """
        Please translate the received text in any language into English as output
        """
    messages = state["messages"][-1]

    messages = [SystemMessage(content=system_prompt)] + [HumanMessage(content=messages.content)]
    response = llm.invoke(messages)
    return {"messages":response}

builder = StateGraph(State)

builder.add_node("call_model",call_model)
builder.add_node("translate_message",translate_message)

builder.add_edge(START,"call_model")
builder.add_edge("call_model","translate_message")
builder.add_edge("translate_message",END)

memory = MemorySaver()

# 在编译图的时候添加检查点
graph_with_memory = builder.compile(checkpointer=memory)

# 这个 thread_id 可以取任意数值
config = {"configurable":{"thread_id":"1"}}

for chunk in graph_with_memory.stream({"messages":["你好，我叫西瓜老师"]},config,stream_mode="values"):
    chunk["messages"][-1].pretty_print()

for chunk in graph_with_memory.stream({"messages": ["请问我叫什么？"]}, config, stream_mode="values"):
    chunk["messages"][-1].pretty_print()