from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_zhipu import ZhipuAIEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import Docx2txtLoader
from langchain_community.document_loaders import TextLoader
import os

from logger import setup_logger



class FAISSService:
    logger = setup_logger()
    vectorstore = None
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=0)
    embeddings = ZhipuAIEmbeddings(api_key="d3708ee404327e207b2f003775e06908.X3dgRCxbkyDfEIbh")

    def __init__(self, dir):
        # 文本的分割
        all_splits = self.text_splitter.split_documents(self.load_os_file(dir))

        self.vectorstore = FAISS.from_documents(all_splits, self.embeddings)

    @staticmethod
    def load_os_file(file_path) -> list:
        documents = []
        for file_name in os.listdir(file_path):
            # 构造文件的完整路径
            full_file_path = os.path.join(file_path, file_name)

            # 检查是否为文件
            if os.path.isfile(full_file_path):
                if file_name.endswith('.pdf'):
                    loader = PyPDFLoader(full_file_path)
                    documents.extend(loader.load())
                elif file_name.endswith('.docx') or file_name.endswith('.doc'):
                    loader = Docx2txtLoader(full_file_path)
                    documents.extend(loader.load())
                else:
                    # 这里假设 TextLoader 可以处理其他类型的文件
                    loader = TextLoader(full_file_path, encoding='utf-8')
                    documents.extend(loader.load())

        return documents

    def load_file(self, file_path):
        self.logger.info("file_path:" + file_path)
        document = None
        if file_path.endswith('.pdf'):
            loader = PyPDFLoader(file_path)
            document = loader.load()
        elif file_path.endswith('.docx') or file_path.endswith('.doc'):
            loader = Docx2txtLoader(file_path)
            document = loader.load()
        else:
            # 这里假设 TextLoader 可以处理其他类型的文件
            loader = TextLoader(file_path, encoding='utf-8')
            document = loader.load()
        all_splits = self.text_splitter.split_documents(document)
        vectorstore_new = FAISS.from_documents(all_splits, self.embeddings)
        self.vectorstore.merge_from(vectorstore_new)
