# -*- coding: UTF-8 -*-
# @IDE     : VScode
# @File   : format_chain.py
# @Time   : 2024/05/26 17:28:22
# @Author : zhonggc

import os
from langchain_core.runnables import RunnablePassthrough
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_community.chat_message_histories import ChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
import sys
from pathlib import Path

sys.path.append(str(Path(__file__).resolve().parent.parent))

from LLM import zhipu_llm
from zhipuai_embeddings import ZhipuAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
import asyncio


chat_history = ChatMessageHistory()


def get_prompt_template():
    """构建对话模板"""
    prompt = ChatPromptTemplate.from_messages(
        [
            (
                "system",
                "你是一个智能知识助手聊天机器人，你的任务是尽可能详细并准确地回答用户的问题，如果你不知道问题的答案就回答`抱歉，我暂时还不知道这个问题的答案`，不要试图编造一个虚假或者错误的答案。",
            ),
            MessagesPlaceholder(variable_name="chat_history"),
            (
                "human",
                "{input}, 你回答问题的依据是根据```{context}```上下文结合大模型处理后的最终答案。",
            ),
        ],
    )
    return prompt


def get_model():
    """构建对话模型"""
    api_key = os.environ.get("ZHIPUAI_API_KEY")

    model = zhipu_llm.ZhiPu(api_key=api_key)
    return model


def get_output_parser():
    """获取输出解析器"""
    parser = StrOutputParser()
    return parser


def trim_messages(chain_input):
    """裁剪消息，最多保留20条消息

    Args:
        chain_input (_type_): _description_
    """
    max_messages = 20
    stored_messages = chat_history.messages
    if len(stored_messages) < max_messages:
        return False
    chat_history.clear()
    for message in stored_messages[-max_messages:]:
        chat_history.add_message(message)
    return True


def get_vectordb():
    """获取向量库"""
    embedding = ZhipuAIEmbeddings()
    persist_directory = "./src/ChatBot/DataBase/chroma"
    vectordb = Chroma(
        persist_directory=persist_directory,
        embedding_function=embedding,
    )
    return vectordb


def get_final_chain():
    """获取会话历史记录"""
    prompt = get_prompt_template()
    model = get_model()
    parser = get_output_parser()

    chain_with_message_history = RunnableWithMessageHistory(
        prompt | model | parser,
        lambda session_id: chat_history,
        input_messages_key="input",
        history_messages_key="chat_history",
    )

    # 裁剪消息
    chain_with_trim_messages = (
        RunnablePassthrough.assign(messages_trimmed=trim_messages)
        | chain_with_message_history
    )

    return chain_with_trim_messages


async def get_answer(input_text: str, session_id: str):
    """获取答案

    Args:
        input_text (str): 输入文本
        session_id (str): 会话id
    """
    vectordb = get_vectordb()
    # 先通过最大边际相关性搜索从本地知识库获取上下文
    mmr_docs = vectordb.max_marginal_relevance_search(query=input_text, k=3)
    final_chain = get_final_chain()
    # 再通过对话模型获取最终答案
    async for res in final_chain.astream(
        {"input": input_text, "context": mmr_docs},
        {"configurable": {"session_id": session_id}},
    ):
        yield res
