import os
import concurrent.futures
from langchain.document_loaders import PyMuPDFLoader, Docx2txtLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.schema import Document  # ✅ 关键点：使用 Document 封装文本
from pptx import Presentation


class LangChain_Chunck:
    def __init__(self,folder_path):
        # 1️**指定文件夹路径**

        # 2️**获取所有 PDF、Word、PPT 文件**
        self.pdf_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith('.pdf')]
        self.word_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith('.docx')]
        self.ppt_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith('.pptx')]

    def load_pptx(self,file_path):
        try:
            prs = Presentation(file_path)
            texts = []
            for slide in prs.slides:
                for shape in slide.shapes:
                    if hasattr(shape, "text") and shape.text.strip():
                        texts.append(shape.text.strip())
            return Document(page_content="\n".join(texts), metadata={"source": file_path})  # ✅ 关键点
        except Exception as e:
            print(f"解析 PPT 失败: {file_path}, 错误: {e}")
            return None  # 返回 None，避免空数据干扰
        
        
    def load_pdf(self,pdf):
        try:
            return PyMuPDFLoader(pdf).load()
        except:
            print(pdf)
            return None
    def load_word(self,doc):
        try:
            return Docx2txtLoader(doc).load()
        except:
            print(doc)
            return None

    def __call__(self,chunk_size=200,chunk_overlap=20,separator="\n"):
        with concurrent.futures.ProcessPoolExecutor() as executor:
            pdf_docs = list(executor.map(self.load_pdf, self.pdf_files))
            word_docs = list(executor.map(self.load_word, self.word_files))
            ppt_docs = list(executor.map(self.load_pptx, self.ppt_files))

            # 6️⃣ **展平成一个列表**
            documents = [doc for sublist in pdf_docs for doc in sublist] + \
                        [doc for sublist in word_docs for doc in sublist] + \
                        [doc for doc in ppt_docs if doc is not None]  # ✅ 确保 ppt_docs 不是 None

            print(f"✅ 共加载 {len(documents)} 份文档")

            # 7️⃣ **文本分块（Chunking）**
            text_splitter = CharacterTextSplitter(
                separator=separator,
                chunk_size=chunk_size,
                chunk_overlap=chunk_overlap
            )
            chunks = text_splitter.split_documents(documents)
            print(f"✅ 共生成 {len(chunks)} 个文本块")

            # # 8️⃣ **打印部分 Chunk 内容**
            # for i, chunk in enumerate(chunks[:]):  # 仅打印前 5 个 Chunk
            #     print(f"\n🔹 Chunk {i+1}:")
            #     print(chunk.page_content[:])  # 仅显示前 300 个字符
            return chunks


if __name__ == "__main__":
    langchain_chunck = LangChain_Chunck("./data")
    ans = langchain_chunck(200,20)
    print(ans)