from langchain_openai import ChatOpenAI
from langgraph.constants import START, END
from langgraph.graph import StateGraph
from typing_extensions import TypedDict, Optional


# 数据结构创建
# 定义输⼊的模式
class InputState(TypedDict):
    question: str
    llm_answer: Optional[str]  # 表示 answer 可以是 str 类型，也可以是 None


# 定义输出的模式
class OutputState(TypedDict):
    answer: str


# 将 InputState 和 OutputState 这两个 TypedDict 类型合并成⼀个更全⾯的字典类型。
class OverallState(InputState, OutputState):
    pass


# 创建大模型节点
def llm_node(state: InputState):
    messages = [
        ("system", "你是⼀位乐于助⼈的智能⼩助理",),
        ("human", state["question"])
    ]
    llm = ChatOpenAI(
        model="deepseek-chat",
        api_key="sk-0104c1a31c5d4819b4408ceebf1b4563",
        base_url="https://api.deepseek.com/v1",
        temperature=0,
    )
    response = llm.invoke(messages)
    return {"llm_answer": response.content}


# 节点2
def action_node(state: InputState):
    messages = [
        ("system", "⽆论你接收到什么语⾔的⽂本，使用中英双语输出",),
        ("human", state["llm_answer"])
    ]
    llm = ChatOpenAI(
        model="deepseek-chat",
        api_key="sk-0104c1a31c5d4819b4408ceebf1b4563",
        base_url="https://api.deepseek.com/v1",
        temperature=0,
    )
    response = llm.invoke(messages)
    return {"answer": response.content}


# 明确指定它的输⼊和输出数据的结构或模式
builder = StateGraph(
    OverallState,
    input_schema=InputState,
    output_schema=OutputState
)

# 添加节点
builder.add_node("llm_node", llm_node)
builder.add_node("action_node", action_node)

# 添加边
builder.add_edge(START, "llm_node")
builder.add_edge("llm_node", "action_node")
builder.add_edge("action_node", END)

# 编译图
graph = builder.compile()
final_answer = graph.invoke({"question": "你好，请你详细的介绍⼀下你⾃⼰"})
print(final_answer["answer"])
