import os

from langchain_community.document_loaders import PyMuPDFLoader
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from tqdm import tqdm
from lc_frame.utils.log import logger
import warnings

warnings.filterwarnings('ignore')


def load_pdf_content(file_path):
    pdf_loader = PyMuPDFLoader(file_path=file_path)
    pdf = pdf_loader.load()
    return pdf


def split_documents(docs):
    separators = ["\n", ".", "。", "!", "！", "?", "？", "；", ";", "……", "…", "、", "，", ",", " "]
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,
                                                   chunk_overlap=100,
                                                   length_function=len,
                                                   separators=separators,
                                                   keep_separator=True,
                                                   is_separator_regex=False
                                                   )
    return text_splitter.split_documents(docs)


class PDF_Agent:
    def __init__(self, dict_path=None, vectorstore=None):
        if dict_path is None:
            raise ValueError("dict_path is required.")
        if vectorstore is None:
            raise ValueError("vectorstore is required.")
        self.dict_path = dict_path
        self.vectorstore = vectorstore

    # 获取所有pdf文件
    def load_pdfs(self):
        pdfs = []
        for file in os.listdir(self.dict_path):
            if file.lower().endswith(".pdf"):
                pdfs.append(os.path.join(self.dict_path, file))
        logger.info(f"总共有{len(pdfs)}份pdf文件")
        return pdfs

    def save_pdf2vector(self, split_pdfs):
        batch_size = 1000  # 每次处理的样本数量
        for i in range(0, len(split_pdfs), batch_size):
            batch = split_pdfs[i:i + batch_size]  # 获取当前批次的样本
            self.vectorstore.add_documents(documents=batch)  # 入库

    def run(self):
        pdfs_path = self.load_pdfs()
        pdfs = []
        for pdf_path in tqdm(pdfs_path):
            pdf_content = load_pdf_content(pdf_path)
            pdfs.extend(pdf_content)
        split_pdfs = split_documents(pdfs)
        self.save_pdf2vector(split_pdfs)


class Text_Agent:
    def __init__(self, dict_path=None, vectorstore=None):
        if dict_path is None:
            raise ValueError("dict_path is required.")
        if vectorstore is None:
            raise ValueError("vectorstore is required.")
        self.dict_path = dict_path
        self.vectorstore = vectorstore

    """
        加载text文件
    """

    def load_texts(self):
        files = []
        for file_name in tqdm(os.listdir(self.dict_path)):
            file_path = os.path.join(self.dict_path, file_name)
            with open(file=file_path, encoding="utf-8", mode="r") as f:
                file = f.read().replace("\n", "")
                # 拼装doc对象用于切分
                file = Document(page_content=file)
                files.append(file)
        return files

    def run(self):
        texts = self.load_texts()
        batch_size = 1000  # 批次处理的数量
        texts = split_documents(texts)
        logger.info(f"开始存储")
        for i in range(0, len(texts), batch_size):
            text = texts[i:i + batch_size]
            self.vectorstore.add_documents(text)
        logger.info(f"存储结束")
