
from langchain_core.messages import RemoveMessage
from langmem.short_term import SummarizationNode
from langchain_core.messages.utils import count_tokens_approximately
from loguru import logger

from core.llm import llm
from core.state import AgentState

summarization_node = SummarizationNode(
    model=llm.bind(max_tokens=256),  # Use a concise model for summaries
    token_counter=count_tokens_approximately,
    max_tokens=1000,  # Maximum tokens to return in final output
    max_tokens_before_summary=1000,  # When to trigger summarization
    max_summary_tokens=200,  # Maximum tokens for the summary itself
    input_messages_key="messages",
    output_messages_key="messages",  # Overwrite messages directly
)

def compress_node(state: AgentState) -> dict:

    """压缩节点：必要时自动对消息进行摘要生成."""
    messages = state["messages"]
    # 过滤已移除的消息
    countable_messages = [msg for msg in messages if not isinstance(msg, RemoveMessage)]
    current_tokens = count_tokens_approximately(countable_messages)

    logger.info(f"🔍 消息压缩检查：{current_tokens} tokens (限制：1000)")

    # 自动判断是否需要压缩消息
    result = summarization_node.invoke(state)

    # 检查是否压缩消息
    new_messages = result.get("messages", messages)
    # 计算Token之前过滤已移除的消息
    countable_messages = [msg for msg in new_messages if not isinstance(msg, RemoveMessage)]
    new_tokens = count_tokens_approximately(countable_messages)

    if current_tokens > new_tokens:
        logger.info(f"🔄 正在压缩 ...")
        logger.info(f"✅ Tokens 从 {current_tokens} 减少到 {new_tokens} (节约 {current_tokens - new_tokens})")
        logger.info(f"📝 消息从 {len(messages)} 压缩至 {len(new_messages)}")

        # 显示生成的摘要
        if result.get("context", {}).get("running_summary"):
            summary = result["context"]["running_summary"]
            if hasattr(summary, 'content') and summary.content:
                logger.info(f"📋 摘要生成： {len(summary.content)} 字节")
    else:
        logger.info(f"✅ 无需压缩 (当前：{current_tokens} 字节)")

    return result