# 导入必须的包
from langchain_community.document_loaders import UnstructuredExcelLoader, Docx2txtLoader, PyPDFLoader
from langchain_community.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
import __init__


# 定义chatdoc
class ChatDoc():
    def __init__(self):
        self.doc = None
        self.splitText = []  # 分割后的文本

    def getFile(self):
        doc = self.doc
        loaders = {
            "docx": Docx2txtLoader,
            "pdf": PyPDFLoader,
            "xlsx": UnstructuredExcelLoader
        }
        file_extension = doc.split(".")[-1]
        loader_class = loaders.get(file_extension)
        if loader_class:
            try:
                # 读取文件
                loader = loader_class(doc)
                text = loader.load()
                return text
            except Exception as e:
                print(f"Error loading {file_extension} files:{e}")
        else:
            print(f"Unsupported file extension: {file_extension}")

    # 处理文档的函数
    def splitSentences(self):
        # 获取文档内容
        full_text = self.getFile()
        if full_text is not None:
            # 对文档进行分割
            text_split = CharacterTextSplitter(
                chunk_size=150,
                chunk_overlap=20
            )
            texts = text_split.split_documents(full_text)
            self.splitText = texts

    # 向量化与向量存储
    def embeddingAndVectorDB(self):
        embeddings = OpenAIEmbeddings()
        db = Chroma.from_documents(
            documents=self.splitText,
            embedding=embeddings
        )
        return db

    # 提问并找到相关的文本块
    def askAndFindFiles(self, question):
        db = self.embeddingAndVectorDB()
        retriever = db.as_retriever()
        results = retriever.invoke(question)
        return results


chat_doc = ChatDoc()
chat_doc.doc = "../kecheng源码/example/fake.pdf"
chat_doc.splitSentences()
print(chat_doc.askAndFindFiles("这家公司叫什么名字？"))
