from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_core.messages import HumanMessage
from langchain_core.output_parsers import BaseOutputParser

from langchain_core.output_parsers import BaseOutputParser
#由于chatglm模型的多轮对话会在每个回答得结尾添加<|end|>，因此需要自定义一个输出解析器，去除<|end|>
# 输出解析器
class ChatGLMOutputParser(BaseOutputParser[str]):
    def parse(self, text: str) -> str:
        return text.split("<|end|>")[0].strip()

# 初始化带模板支持的LLM
llm = ChatOpenAI(
    api_key="EMPTY",
    base_url="http://127.0.0.1:8102/v1",
    model="/data/model/models/ZhipuAI/chatglm3-6b",
    temperature=0.7,
    max_tokens=512
)

# # 构建对话链
prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个严谨的AI助手，回答需准确且不超过50字"),
    MessagesPlaceholder(variable_name="my_msg")
])
config={"configurable":{"session_id":"lisi"}}
store = {}

def get_session_history(session_id:str):
    if session_id not in store:
        store[session_id] = ChatMessageHistory()
    return store[session_id]
chain = prompt | llm | ChatGLMOutputParser()
do_message = RunnableWithMessageHistory(
    chain,
    get_session_history,
    input_messages_key='my_msg' #每次聊天发送的key
)
while True:
    input_text = input("请输入问题：")
    if input_text == "exit":
        break
    response = do_message.invoke({"my_msg":[HumanMessage(content=input_text)]},config=config)
    print(response)