import os
from typing import Any

from langchain_core.messages.utils import count_tokens_approximately
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.prebuilt.chat_agent_executor import AgentState, create_react_agent
from langmem.short_term import SummarizationNode

base_url = os.environ.get("DASHSCOPE_BASE_URL")
model_name = os.environ.get("DASHSCOPE_MODEL_NAME")
os.environ["OPENAI_API_KEY"] = os.getenv("DASHSCOPE_API_KEY")

llm = ChatOpenAI(base_url=base_url, model_name=model_name)

print('count_tokens_approximately', count_tokens_approximately)

summarization_node = SummarizationNode(
    model=llm,
    token_counter=count_tokens_approximately,
    max_tokens=1000,
    max_summary_tokens=500,
    output_messages_key="llm_input_messages"
)


class State(AgentState):
    # 注意：这个状态管理的作用是为了能够保存上一次总结的结果。这样就可以防止每次调用大模型时，都要重新总结历史信息。
    # 这是一个比较常见的优化方式，因为大模型的调用是比较耗费时间的
    context: dict[str, Any]


check_pointer = InMemorySaver()

agent = create_react_agent(
    model=llm,
    tools=[],
    prompt="你是一个智能AI助手",
    pre_model_hook=summarization_node,
    state_schema=State,
    checkpointer=check_pointer
)

config = {
    'configurable': {
        'thread_id': "1"
    }
}

cs_res = agent.invoke({"messages": [{"role": "user", "content": "我是Alm，记住哦！"}]}, config)
print(cs_res)

bj_res = agent.invoke({"messages": [{"role": "user", "content": "我是谁？"}]}, config)
print(bj_res)
