from langchain_core.messages import SystemMessage, HumanMessage, RemoveMessage, trim_messages
from langchain_qwq import ChatQwQ
from langgraph.graph import MessagesState

from app.config import config

chat_model = ChatQwQ(
    model=config.get("llm_aliyuncs", "model"),
    api_base=config.get("llm_aliyuncs", "base_url"),
    api_key=config.get("llm_aliyuncs", "api_key"),
    temperature=config.getfloat("llm_aliyuncs", "temperature"),
    max_tokens=3_000,
    timeout=None,
    max_retries=2
)


def retrieve(state: MessagesState):
    system_prompt = (
        "You are a helpful assistant. Answer all questions to the best of your ability."
    )
    messages = [SystemMessage(content=system_prompt)] + state["messages"]
    response = chat_model.invoke(messages)
    return {"messages": response}


def retrieve_with_trim_message(state: MessagesState):
    system_prompt = (
        "You are a helpful assistant. Answer all questions to the best of your ability."
    )

    # 将每个消息计数为1个“token”（token_counter=len），并仅保留最后两个消息
    trimmer = trim_messages(strategy="last", max_tokens=10, token_counter=len)
    trimmed_messages = trimmer.invoke(state["messages"])
    messages = [SystemMessage(content=system_prompt)] + trimmed_messages
    print(f"Trimmed messages: {messages}")

    response = chat_model.invoke(messages)
    return {"messages": response}


def generate_summary(messages):
    """
    调用模型生成会话摘要
    """
    summary_prompt = (
        "Distill the above chat messages into a single summary message. "
        "Include as many specific details as you can."
    )
    summary_message = chat_model.invoke(
        messages + [HumanMessage(content=summary_prompt)]
    )
    return summary_message


def retrieve_with_summary(state: MessagesState):
    system_prompt = (
        "You are a helpful assistant. Answer all questions to the best of your ability."
        "The provided chat history includes a summary of the earlier conversation."
    )
    system_message = SystemMessage(content=system_prompt)

    if len(state["messages"]) < 5:
        message_updates = chat_model.invoke([system_message] + state["messages"])
        return {"messages": message_updates}

    # 如果消息达到一定大小，则为历史消息生成摘要
    last_human_message = state["messages"][-1]
    summary_message = generate_summary(state["messages"][:-1])  # 排除最新消息
    delete_messages = [RemoveMessage(id=m.id) for m in state["messages"]]
    human_message = HumanMessage(content=last_human_message.content)
    response = chat_model.invoke([system_message, summary_message, human_message])
    message_updates = [summary_message, human_message, response] + delete_messages
    return {"messages": message_updates}
