from typing import Dict

from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_core.messages import HumanMessage, BaseMessage
from langchain.chains.conversation.base import ConversationChain

from langchain_core.output_parsers import BaseOutputParser
#由于chatglm模型的多轮对话会在每个回答得结尾添加<|end|>，因此需要自定义一个输出解析器，去除<|end|>
# 输出解析器
class ChatGLMOutputParser(BaseOutputParser[str]):
    def parse(self, text: str) -> str:
        return text.split("<|end|>")[0].strip()

# 初始化带模板支持的LLM
llm = ChatOpenAI(
    api_key="EMPTY",
    base_url="http://127.0.0.1:8102/v1",
    model="/data/model/models/ZhipuAI/chatglm3-6b",
    temperature=0.7,
    max_tokens=512
)

# # 构建对话链
prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个严谨的AI助手，回答需准确且不超过50字"),
    MessagesPlaceholder(variable_name="history")
])

config={"configurable":{"session_id":"wangwu"}}
store = {}


class LimitedMemoryChatMessageHistory(ChatMessageHistory):
    def __init__(self, max_messages: int = 4):  # 默认保留4条消息(2轮对话)
        super().__init__()
        self._max = max_messages

    def add_message(self, message: BaseMessage) -> None:
        super().add_message(message)
        # 自动修剪历史
        if len(self.messages) > self._max:
            self.messages = self.messages[-self._max:]

memory_storage_limited: Dict[str, LimitedMemoryChatMessageHistory] = {}
def get_limited_message_history(session_id: str) -> LimitedMemoryChatMessageHistory:
    if session_id not in memory_storage_limited:
        memory_storage_limited[session_id] = LimitedMemoryChatMessageHistory(max_messages=20)
    return memory_storage_limited[session_id]
def get_session_history(session_id:str):
    if session_id not in store:
        store[session_id] = ChatMessageHistory()
    return store[session_id]
chain = prompt | llm | ChatGLMOutputParser()
do_message = RunnableWithMessageHistory(
    chain,
    get_limited_message_history,
    input_messages_key='history' #每次聊天发送的key
)
while True:
    input_text = input("请输入问题：")
    if input_text == "exit":
        break
    response = do_message.invoke({"history":[HumanMessage(content=input_text)]},config=config)
    print(response)