import os
from typing import Any

import dotenv
from langchain_core.messages.utils import count_tokens_approximately
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.prebuilt import create_react_agent
from langgraph.prebuilt.chat_agent_executor import AgentState
from langmem.short_term import SummarizationNode

dotenv.load_dotenv()

llm = ChatOpenAI(
    model="deepseek-chat",
    base_url=os.getenv("DS_BASE"),
    api_key=os.getenv("DS_API_KEY"),
    temperature=0
)

# 使用大模型对历史信息进行总结
summarization_node = SummarizationNode(
    token_counter=count_tokens_approximately,
    model=llm,
    max_tokens=384,
    max_summary_tokens=128,
    output_messages_key="llm_input_messages"
)


class State(AgentState):
    # 注意: 这个状态管理的作用是为了能够保存上一次总结的结果,这样就可以防止每次调用大模型时,都要重新总结历史信息
    # 这是一个比较常见的优化方式,因为大模型的调用是比较耗时的
    context: dict[str, Any]


checkpointer = InMemorySaver()

agent = create_react_agent(
    model=llm,
    tools=[],
    pre_model_hook=summarization_node,
    state_schema=State,
    checkpointer=checkpointer,
)
