from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_ollama import OllamaEmbeddings, ChatOllama
from langchain_postgres import PGVector
from langchain.retrievers import MultiQueryRetriever
from langchain.retrievers.contextual_compression import ContextualCompressionRetriever
from langchain_core.prompts import ChatMessagePromptTemplate, ChatPromptTemplate

import logging

llm = ChatOllama(
    base_url="http://10.2.4.31:11434",
    model="qwen2.5:latest",
    temperature=0.8,
    num_predict=256)

embeddings = OllamaEmbeddings(
    model="quentinz/bge-large-zh-v1.5:latest",
    base_url="http://10.2.4.31:11434",
)


class ChatDoc:
    def __init__(self, path):
        self.path = path
        self.data = None
        self.documents = None
        self.pgvector = PGVector(
            connection="postgresql+psycopg://langchain:langchain@10.2.4.31:6024/langchain",
            embeddings=embeddings,
            collection_name="test"
        )
        self.template = [
            ("system",
             "你是一个处理文档的秘书，你从不说自己是一个大模型或者AI助手，你会根据下面提供的上下文内容继续回答问题\n. 上下文内容: {context}"),
            ("human", "你好！"),
            ("ai", "你好！"),
            ("human", "{question}")
        ]
        self.prompt = ChatPromptTemplate.from_messages(self.template)

    def load_file(self):
        ext = self.path.split(".")[-1]
        if ext == "docx":
            loader = Docx2txtLoader(self.path)
            return loader.load()
        elif ext == "pdf":
            loader = PyPDFLoader(self.path)
            return loader.load()
        else:
            raise ValueError("不支持的文件格式")

    def doc_text_splitter(self, chunk_size=120, chunk_overlap=20):
        self.data = self.load_file()
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
        return text_splitter.split_documents(self.data)

    def embeddingData(self):
        data = self.doc_text_splitter()
        self.pgvector.delete_collection()
        self.pgvector.create_collection()
        self.pgvector.add_documents(data)
        return self.pgvector

    def askWithFile(self, question):
        retriever = MultiQueryRetriever.from_llm(
            llm=llm,
            retriever=self.pgvector.as_retriever()
        )
        return retriever.invoke(question)

    def askWithCompression(self, question):
        return ContextualCompressionRetriever(
            base_compressor=LLMChainExtractor.from_llm(llm),
            base_retriever=self.pgvector.as_retriever()
        ).invoke(question)

    def chatWithFile(self, question):
        docs = self.askWithCompression(question)
        content = ""
        for doc in docs:
            content += doc.page_content + "\n"
        logging.debug(f"文档内容：{content}")
        messages = self.prompt.format(question=question, context=content)
        return llm.invoke(messages)


if __name__ == "__main__":

    logging.basicConfig(level=logging.DEBUG)
    chatDoc = ChatDoc("./data/pdfs.pdf")
    print(chatDoc.chatWithFile("你好").content)
