from dotenv import load_dotenv
from langchain.chat_models import init_chat_model
from langchain_core.messages import HumanMessage, AIMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import START, MessagesState, StateGraph
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import SystemMessage, trim_messages

load_dotenv(".venv/.env")

model = init_chat_model("gpt-4o-mini", model_provider="openai")

# 传递给model时，去除多余的历史数据
trimmer = trim_messages(max_tokens=500,
                        strategy="last",
                        token_counter=model,
                        include_system=True,
                        allow_partial=False,
                        start_on="human"
                        )

prompt_template = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            "You talk like a pirate. Answer all questions to the best of your ability.",
        ),
        MessagesPlaceholder(variable_name="messages"),
    ]
)

# 使用langgraph记录多论对话
# 定义一个graph
workflow = StateGraph(state_schema=MessagesState)

# 定义一个函数，用于调用模型
def call_model(state: MessagesState):
    trimmed_message = trimmer.invoke(state["messages"])
    prompt = prompt_template.invoke({"messages": trimmed_message})
    # 如果只有一个messages参数，也可以这样调用
    # trimmed_message = trimmer.invoke(state)
    # prompt = prompt_template.invoke(trimmed_message)

    response = model.invoke(prompt)
    return {"messages": response}

workflow.add_edge(START, "model")
workflow.add_node("model", call_model)

# 添加记录器
memory = MemorySaver()
app = workflow.compile(checkpointer=memory)
# 这一配置使得我们可以让程序支持多组对话
config = {"configurable": {"thread_id": "abc123"}}

def conversation_call(query):
    input_messages = [HumanMessage(query)]
    return app.invoke({"messages": input_messages}, config)

output = conversation_call("Hi! I'm Bob.")
# output里面包含了所有的历史聊天信息(HumanMessage和AiMessage都有)，所以取最新的一条进行回复
output["messages"][-1].pretty_print()
output = conversation_call("What's my name?")
output["messages"][-1].pretty_print()



