# PDF 加载器
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_community.document_loaders import PyPDFLoader
#文档分割
from langchain.text_splitter import CharacterTextSplitter
from langchain_chroma import Chroma
from langchain_core.documents import Document
from langchain_huggingface import HuggingFaceEmbeddings
from langchain.retrievers import ContextualCompressionRetriever
from langchain_deepseek import ChatDeepSeek
from langchain.prompts import ChatPromptTemplate

class Config:
    """
    通用配置信息
    """
    CHROMA_SAVE_DIR = "chromadb"

class DocLoader:
    """
    文件加载器，用于加载指定的文件内容，目前只有PDF格式，后面会再增加
    """

    model_name = "BAAI/bge-m3"

    def __init__(self):
        self.template = [
            ("system", """你是一个处理文档的秘书，你会根据下面提供的上下文来回答问题，当你不知道问题怎么回答的时候你会直接说不知道，并且永远不会说你是AI或AI助手。
            上下文
            {context}
            """),
            ("human", "你好"),
            ("ai", "你好！"),
            ("human", "{question}")
        ]
        self.prompt = ChatPromptTemplate.from_messages(self.template)

    def get_chromadb(self) -> Chroma:

        model_kwargs = {'device': 'cpu'}
        encode_kwargs = {'normalize_embeddings': True}
        embeddings = HuggingFaceEmbeddings(
            model_name=self.model_name,
            model_kwargs=model_kwargs,
            encode_kwargs=encode_kwargs
        )
        db = Chroma(
            persist_directory=Config.CHROMA_SAVE_DIR,
            embedding_function=embeddings
        )
        return db

    def save_doc(self, file_path):
        """
        加载文件并进行分割
        :return:
        """

        # 进行文件分割
        splitter = CharacterTextSplitter(
            separator="。",
            # keep_separator=True,
            chunk_size=200,
            chunk_overlap=40,
            length_function=len,
            add_start_index=True
        )

        # 加载PDF文件
        loader = PyPDFLoader(file_path)
        pages = loader.load_and_split(splitter)
        # print("\n------")
        # print(pages[0].page_content)
        # print("\n------")
        # print(pages[1].page_content)
        # print("\n------")
        # print(pages[2].page_content)
        # print("\n------")
        # print(pages[3].page_content)
        # print("\n------")
        # print(pages[4].page_content)
        # print("\n------")
        # print(pages[5].page_content)

        # context_list = [page.page_content for page in pages]
        # texts = splitter.create_documents(context_list)
        # return texts

        # model = FlagModel(model_name='BAAI/bge-m3')

        db = self.get_chromadb()
        index = 0
        length = len(pages)
        while index < length:
            print(f"index:{index},len:{length}")
            start = index
            end = index + 5
            index += 5
            if end >= length:
                db.add_documents(pages[start:length])
            else:
                db.add_documents(pages[start:end])


    def get_doc(self, query):

        db = self.get_chromadb()
        retriever = db.as_retriever(
            search_type = "similarity_score_threshold",
            search_kwargs = {
                "score_threshold" : 0.3,
                "k" : 1
            }
        )
        res_docs = retriever.invoke(input=query)
        context = ""
        for doc in res_docs:
            context += doc.page_content

        print(f"context:{context} \n query:{query}")
        messages = self.prompt.format_messages(context=context, question=query)
        llm = ChatDeepSeek(
            model="deepseek-chat",
            temperature = 0
        )
        return llm.invoke(messages)
        # compressor =  LLMChainExtractor.from_llm(
        #     llm= llm
        # )
        # compressor_retriever =  ContextualCompressionRetriever(
        #     base_retriever= retriever,
        #     base_compressor= compressor
        # )
        #
        # return compressor_retriever.invoke(query)

        # print(db._collection.count())
        # res_docs = db.similarity_search(
        #     query=query,
        #     k=3
        # )
        #
        # return res_docs
