import glob
import os
import re
import shutil
import tempfile
from PIL import Image, ImageEnhance
from langchain_text_splitters import RecursiveCharacterTextSplitter
from pdf2image import convert_from_path
from paddleocr import PaddleOCR
from langchain_community.document_loaders import DirectoryLoader, JSONLoader, Docx2txtLoader
from langchain_community.document_loaders import UnstructuredMarkdownLoader,TextLoader, Docx2txtLoader  # 尝试使用UnstructuredMarkdownLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document
from docx import Document as DocxDocument
class DataloaderModule:
    def __init__(self,directory_path,chunk_size = 5000 , chunk_overlap = 1000):
        self.all_content = 0
        self.directory_path = directory_path
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.documents = []
        # 使用更智能的文本分割器
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            length_function=len,
            # separators=["\n\n", "\n", "。", "！", "？", "；", "，", " "]
            separators=["\n\n", "\n", " ", ""]
        )

    def load_markdown_document(self, markdown_path):
        # 检查路径是否存在
        if not os.path.exists(markdown_path):
            print(f"Markdown路径不存在: {markdown_path}，跳过加载")
            return

        # 如果是目录，查找所有md文件；如果是文件，直接加载
        if os.path.isdir(markdown_path):
            md_files = glob.glob(f"{markdown_path}/*.md")
            if not md_files:
                print(f"在 {markdown_path} 中未找到任何md文件，跳过加载")
                return

            total_md = 0
            total_chunks = 0
            for file in md_files:
                try:
                    loader = UnstructuredMarkdownLoader(str(file))
                    md_documents = loader.load()
                    split_md = self.text_splitter.split_documents(md_documents)
                    self.documents.extend(split_md)
                    total_md += len(md_documents)
                    total_chunks += len(split_md)
                except Exception as e:
                    print(f"加载文件 {file} 时出错: {str(e)}")

            print(f"成功加载并分割 {total_md} 个md文件，获得 {total_chunks} 个文档块")

        else:  # 处理单个md文件
            try:
                loader = UnstructuredMarkdownLoader(file_path=markdown_path)
                md_documents = loader.load()
                split_md = self.text_splitter.split_documents(md_documents)
                self.documents.extend(split_md)
                print(f"成功加载并分割 1 个md文件，获得 {len(split_md)} 个文档块")
            except Exception as e:
                print(f"加载md文件时出错: {str(e)}")

    def load_docx(self, docx_path):
        if not os.path.exists(docx_path):
            print(f"路径不存在: {docx_path}，跳过加载")
            return
        # 如果是目录，查找 docx
        if os.path.isdir(docx_path):
            doc_files = glob.glob(f"{docx_path}/*.docx")
            if not doc_files:
                print(f"在 {docx_path} 中未找到任何 docx 文件，跳过加载")
                return
        else:
            doc_files = [docx_path]
        total_docs = 0
        total_chunks = 0

        for file in doc_files:
            try:
                if file.endswith(".docx"):
                    loader = Docx2txtLoader(file)
                else:
                    print(f"暂不支持的文件格式: {file}")
                    continue

                docs = loader.load()
                split_docs = self.text_splitter.split_documents(docs)
                self.documents.extend(split_docs)
                total_docs += len(docs)
                total_chunks += len(split_docs)
            except Exception as e:
                print(f"加载文件 {file} 时出错: {str(e)}")

        print(f"成功加载并分割 {total_docs} 个文档，获得 {total_chunks} 个文档块")

    def validate_document_and_clean_data(self):
        #去除空内容
        self.documents = [doc for doc in self.documents if doc.page_content.strip()]
        #去重处理
        unique_docs = {doc.page_content:doc for doc in self.documents}
        self.documents = list(unique_docs.values())
        # self.save_document_as_md(self.documents)
        all_content = len(self.documents)
        print(f"去重后的剩余文档{all_content}")

    def load_all_documents(self,md_path,docx_path):
        self.load_markdown_document(md_path)
        self.load_docx(docx_path)
        #数据清理
        self.validate_document_and_clean_data()

    def save_document_as_md(self, document):
        """
        将文档保存为 .md 格式。

        假设 document 对象具有以下属性：
        - filename: 原始文件名 (str)
        - page_content
        """
        # 生成新的 .md 文件名
        all_md_content = []
        base_name = "last"
        md_filename = f"{base_name}.md"
        file_path = os.path.join(self.directory_path, md_filename)

        try:
            with open(file_path, "w", encoding="utf-8") as f:
                f.write(document.page_content)
            print(f"文档已成功保存为: {file_path}")
        except Exception as e:
            print(f"保存文档时出错: {e}")

    def display_summary(self):
        print(f"文件总数{len(self.documents)}")
        for i,doc in enumerate(self.documents[:30]):
            print(f"\n文档{i+1}内容预览:\n{doc.page_content[:200]}...")
    def save_all_documents_as_single_md(self, formatted_documents_list):
        """
        将格式化后的所有文档内容列表保存到一个单一的 .md 文件中。

        Args:
            formatted_documents_list (list of str): 包含序号和内容的文档字符串列表。
        """
        # 生成合并后的 .md 文件名
        merged_md_filename = "all_documents.md"
        file_path = os.path.join(self.directory_path, merged_md_filename)

        try:
            # 将列表中的所有字符串连接成一个大字符串
            # 使用两个换行符 \n\n 作为文档块之间的分隔，这在 Markdown 中通常表示新段落
            merged_content = "\n\n".join(formatted_documents_list)

            with open(file_path, "w", encoding="utf-8") as f:
                f.write(merged_content)

            print(f"所有文档已成功合并保存为: {file_path}")
        except Exception as e:
            print(f"保存合并文档时出错: {e}")
    def display_save_summary(self):
        print(f"文件总数: {len(self.documents)}")

        # 创建一个列表来存储所有格式化后的文档内容
        all_formatted_docs = []

        # 遍历文档（处理前30个）
        for i, doc in enumerate(self.documents):
            # 安全检查 doc 是否有 page_content 属性
            if hasattr(doc, 'page_content'):
                # --- 格式化单个文档内容 ---
                # 1. 添加文档序号作为 Markdown 标题 (例如: ## 文档 1)
                doc_header = f"## 文档 {i + 1}\n\n"
                # 3. 添加完整文档内容
                full_content = f"**完整内容:**\n\n{doc.page_content}\n\n"
                # 4. 可以添加分隔线 (可选)
                separator = "---\n"

                # 将格式化好的部分组合起来
                formatted_doc = f"{doc_header}{full_content}{separator}"
                # 或者，如果你只想保存完整内容和标题：
                # formatted_doc = f"{doc_header}{doc.page_content}\n\n"

                # 将格式化后的内容添加到列表中
                all_formatted_docs.append(formatted_doc)

                # 在控制台打印预览（与之前相同）
                print(f"\n文档{i + 1}内容预览:\n{doc.page_content[:200]}...")
            else:
                print(f"\n文档{i + 1}: 对象没有 'page_content' 属性，跳过。")
                # 也可以为缺少内容的文档添加一个占位符到合并文件中
                # all_formatted_docs.append(f"## 文档 {i + 1}\n\n*内容缺失*\n\n---\n")

        # 所有文档处理完毕后，调用方法保存合并的文件
        if all_formatted_docs: # 确保有内容需要保存
            self.save_all_documents_as_single_md(all_formatted_docs)
        else:
            print("没有有效的文档内容需要保存。")

if __name__ == "__main__":
    #初始化dataloadermodeule,指定路径
    md_path = r"E:\政策\langchain_huitong\data\md"
    docx_path = r"E:\政策\langchain_huitong\data\docx"
    # data_loader = DataloaderModule(directory_path="./data")
    directory_path = r"E:\政策\langchain_huitong\data"
    data_loader = DataloaderModule(directory_path)
    data_loader.load_all_documents(md_path,docx_path)
    #加载处理结果
    data_loader.display_save_summary()