'''

langgraph  chat-workflow
也可看作TAO
'''
# pip install langgraph>0.2.27

# smith
import os

os.environ["LANGSMITH_TRACING"] = "true"
# os.environ["LANGSMITH_API_KEY"] = smith_key
# llm client
# os.environ["OPENAI_API_KEY"] = keyx
from langchain.chat_models import init_chat_model
model = init_chat_model("gpt-4o-mini", model_provider="openai")


from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import START, MessagesState, StateGraph

# Define a new graph
from typing import Sequence
from langchain_core.messages import BaseMessage
from langgraph.graph.message import add_messages
from typing_extensions import Annotated, TypedDict
from typing_extensions import Annotated, TypedDict
class MyState(TypedDict): # 用于prompt_template
    messages: Annotated[Sequence[BaseMessage], add_messages]
    Personx: str
# workflow = StateGraph(state_schema=MessagesState)
# workflow = StateGraph(state_schema=MyState)
workflow = StateGraph(state_schema=MyState) # 用于prompt_template


# 定义prompt转换逻辑
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
prompt_template = ChatPromptTemplate.from_messages(
    [
        # ("system","你说话像个海盗。尽你所能回答所有问题。",),
    ("system","你说话像个 {Personx}。尽你所能回答所有问题。",),
        MessagesPlaceholder(variable_name="messages"),
    ]
) # 节点1
# from langchain_core.messages import HumanMessage
# prompt = prompt_template.invoke({"messages":  [HumanMessage("你好,我是bob.")]})

# Define the function that calls the model
# def call_model(state: MessagesState):
def call_model(state: MyState): # 指定state类型
    prompt = prompt_template.invoke(state) # 节点1推理
    response = model.invoke(prompt)
    return {"messages": response}


# Define the (single) node in the graph
workflow.add_edge(START, "model")
workflow.add_node("model", call_model) # 构建图

# Add memory
memory = MemorySaver()
app = workflow.compile(checkpointer=memory) # 输入默认追加


# 执行
from langchain_core.messages import HumanMessage, SystemMessage
query = "你好,我是bob."
input_messages = [HumanMessage(query)]

config = {"configurable": {"thread_id": "abc123"}}
output = app.invoke({"messages": input_messages, "Personx": "海盗"}, config) # 执行
output["messages"][-1].pretty_print()  # output contains all messages in state # AddableValuesDict

# loop2
input_messages = [HumanMessage("我的名字是什么?")]
output = app.invoke({"messages": input_messages}, config)
output["messages"][-1].pretty_print()