'''
* This is the projet for Brtc LlmOps Platform
* @Author Leon-liao <liaosiliang@alltman.com>
* @Description //TODO 
* @File: 1_study_runnable_history_message.py
* @Time: 2025/7/27
* @All Rights Reserve By Brtc
'''


import dotenv
from langchain_community.chat_message_histories import FileChatMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory
from langchain_openai import ChatOpenAI

"""11、RunnableWithMessageHistory简化代码与使用"""

#1、加载环境变量
dotenv.load_dotenv()
#2、顶一个历史存储字典
store = {}

# 3、工厂函数用于获取指定会话的聊天历史
def get_session_history(session_id:str)->BaseChatMessageHistory:
    if session_id not in store:
        store[session_id] = FileChatMessageHistory(f"./chat_history_{session_id}.txt", encoding="utf-8")
    return store[session_id]

#4、构建对话 提示词
prompt = ChatPromptTemplate.from_messages([
    ("system", "你是一个聊天机器人，  请根据用户的需求回复问题"),
    MessagesPlaceholder("history"),
    ("human", "{query}")
])
llm = ChatOpenAI(model = "gpt-3.5-turbo-16k")

#4、构建链
chain = prompt|llm|StrOutputParser()

# 5、包装链
with_message_chain = RunnableWithMessageHistory(
    chain,
    get_session_history,
    input_messages_key="query",
    history_messages_key="history",
)


while True:
    query = input("请输入您的问题:")
    if query == "exit":
        exit(0)
    response = with_message_chain.stream({"query": query}, config={"configurable":{"session_id":"borui"}})
    print("AI:", end="", flush=True)
    for chunk in response:
        print(chunk, end="", flush=True)
    print("")
