# -*- coding: utf-8 -*-
"""
@Time    : 2024/7/8 14:20 
@Author  : ZhangShenao 
@File    : 4.使用RunnableWithMessageHistory简化调用.py 
@Desc    : 使用RunnableWithMessageHistory简化调用
"""
import uuid

import dotenv
from langchain_community.chat_message_histories import FileChatMessageHistory
from langchain_community.chat_models import ChatZhipuAI
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableWithMessageHistory

# 系统指令
SYSTEM_PROMPT = '你是一个具有记忆功能的AI助手，请根据结合用户的当前提问，和之前的历史聊天记录，生成准确的回答。你们的对话全部采用中文'

# 调用的模型名称
MODEL_NAME = 'glm-4-air'

# 聊天历史Key
HISTORY_KEY = 'history'

# 定义消息历史存储
message_history_store = {}


# 定义获取消息历史的工厂函数
def get_session_history(session_id: str) -> BaseChatMessageHistory:
    if session_id not in message_history_store:
        store = FileChatMessageHistory(file_path=f'./message_history_{session_id}.json')
        message_history_store[session_id] = store

    return message_history_store[session_id]


# 加载环境变量
dotenv.load_dotenv()

# 编排Prompt
prompt = ChatPromptTemplate.from_messages([
    ('system', SYSTEM_PROMPT),
    MessagesPlaceholder(HISTORY_KEY),
    ('human', '{input}')
])

# 构建LLM
llm = ChatZhipuAI(model=MODEL_NAME)

# 创建OutputParser
parser = StrOutputParser()

# 构建chain
chain = prompt | llm | parser

# 使用RunnableWithMessageHistory包装链,简化消息历史功能
chain_with_message_history = RunnableWithMessageHistory(
    runnable=chain,
    input_messages_key='input',
    output_messages_key='output',
    history_messages_key=HISTORY_KEY,
    get_session_history=get_session_history  # 指定获取MessageHistory的工厂函数
)

# 生成session_id
session_id = str(uuid.uuid4())

# 获取用户输入
while True:
    input_str = input('Human: ')
    if 'bye' == input_str:
        print('bye bye~')
        break

    # 流式调用Chain,并传入session_id,用户获取消息历史
    response = chain_with_message_history.stream(
        input={'input': input_str},
        config={'configurable': {
            'session_id': session_id,
        }}
    )
    print('AI：', flush=True, end='')
    output = ''
    for trunk in response:
        print(f'{trunk}', flush=True, end='')
        output += trunk

    print()
