from src.module.Model import Model
from src.module.Embedding import Embedding

from typing import List, Any
from pathlib import Path
from werkzeug.utils import secure_filename

from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader, PyPDFLoader, Docx2txtLoader
from langchain_community.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
from langchain_community.chat_message_histories import RedisChatMessageHistory
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
from langchain_core.messages import SystemMessage


class Rag:
    def __init__(self):
        self.model = Model().model
        self.embedding = Embedding().embedding
        self.base_message = [SystemMessage(content="""
        你现在是一名聊天机器人，用户在向你提问时，会给定你上下文内容。\n
        如果你能从给你的上下文中找到用户提问的问题，那你就总结后回复，如果没找到，你就结合你自己的知识库来回复用户的提问。 \n
        如果你确实不知道这个问题，就回复：‘抱歉，我不知道这个问题的答案’ \n""")]

    @staticmethod
    def get_message_history(session_id: str):
        return RedisChatMessageHistory(
            session_id=session_id,
            url="redis://localhost:6379/0"
        )

    @staticmethod
    def loader_handler(files: List[Any]) -> List[Any]:
        docs = []
        for file in files:
            file_type = file.mimetype
            filename = secure_filename(file.filename)
            save_path = Path(Path(__file__).parent, "./resource", filename)
            if not Path.exists(save_path):
                if not save_path.parent.exists():
                    save_path.parent.mkdir(parents=True, exist_ok=True)
                file.save(save_path)

            loader_map = {
                "application/pdf": PyPDFLoader,
                "text/plain": lambda path: TextLoader(path, encoding="utf8"),
                "application/vnd.openxmlformats-officedocument.wordprocessingml.document": Docx2txtLoader
            }
            if file_type in loader_map:
                docs.extend(loader_map[file_type](save_path).load())
        return docs

    @staticmethod
    def text_splitter(docs: List[Any]):
        text_splitters = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=100)
        return text_splitters.split_documents(docs)

    """
        :description 将文档存放在内存中，可使用 chrome 将数据本地化
    """
    def transform_embedding(self, doc_chunks: List[Any]):
        return FAISS.from_documents(doc_chunks, self.embedding)

    """
        :description ConversationalRetrievalChain，一个专门用于知识库检索和聊天上下文封装的 chain
    """
    def get_chat_chain(self, retriever):
        return ConversationalRetrievalChain.from_llm(
            self.model,
            retriever.as_retriever(
                search_type="mmr",
                search_kwargs={"k": 2}
            ),
            return_source_documents=True
        )

    @staticmethod
    def format_response(response):
        return {
            "code": 200,
            "success": True,
        } | response

    """
        :description 发送消息
    """
    def send_message(self, chain, message, history_message):
        response = chain({ "question": message, "chat_history": self.base_message + history_message })

        answer = response.get('answer', '')
        sources = {doc.metadata.get('source', '') for doc in response.get('source_documents', [])}

        return self.format_response({
            "result": answer,
            "sources": list(sources)
        })

    def send_chat_message(self, message, history_message):
        prompt = ChatPromptTemplate.from_messages(self.base_message + [
            MessagesPlaceholder(variable_name='history'),
            ('human', '{question}')
        ])
        chain = prompt | self.model | StrOutputParser()
        response = chain.invoke({ "history": history_message, "question": message })
        return self.format_response({ "result": response })

    def start(self, message: str, file: List[Any], storage_id = 'ai:rag:0'):
        redis_retriever = self.get_message_history(storage_id)
        if len(file):
            docs = self.loader_handler(file)
            text = self.text_splitter(docs)
            retriever = self.transform_embedding(text)
            chain = self.get_chat_chain(retriever)
            response = self.send_message(chain, message, redis_retriever.messages)
        else:
            response = self.send_chat_message(message, redis_retriever.messages)

        redis_retriever.add_user_message(message)
        redis_retriever.add_ai_message(response.get('result', 'null'))
        return response

