import glob
import os
import re
import shutil
import tempfile
from PIL import Image, ImageEnhance
from langchain_text_splitters import RecursiveCharacterTextSplitter
from pdf2image import convert_from_path
from paddleocr import PaddleOCR
from langchain_community.document_loaders import DirectoryLoader, JSONLoader, Docx2txtLoader, \
    UnstructuredWordDocumentLoader
from langchain_community.document_loaders import UnstructuredMarkdownLoader  # 尝试使用UnstructuredMarkdownLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.docstore.document import Document
from docx import Document as DocxDocument
from langchain_community.document_loaders import TextLoader, Docx2txtLoader
from langchain_community.document_loaders import UnstructuredWordDocumentLoader
class DataloaderModule:
    def __init__(self,directory_path,chunk_size = 500 , chunk_overlap = 50):
        self.directory_path = directory_path
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.documents = []
        # 使用更智能的文本分割器
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            length_function=len,
            separators=["\n\n", "\n", "。", "！", "？", "；", "，", " "]
        )

        # 优化OCR初始化 - 使用文档专用模型
#        self.ocr = None
#        try:
#            # self.ocr = PaddleOCR(
#            #     use_textline_orientation=True,  # 替代 use_angle_cls
#            #     lang="ch",
#            #     text_detection_model_dir='ch_PP-OCRv4_det',  # 替代 det_model_dir
#            #     text_recognition_model_dir='ch_PP-OCRv4_rec',  # 替代 rec_model_dir
#            #     textline_orientation_model_dir='ch_ppocr_mobile_v2.0_cls',  # 替代 cls_model_dir
#            #     # show_log 参数已经废弃，去掉即可
#            # )
#            self.ocr = PaddleOCR(
#                use_textline_orientation=True,  # 替代 use_textline_orientation
#                lang="ch"  # 中文模型
#            )
#            print("PaddleOCR初始化成功（文档专用配置）")
#        except Exception as e:
#            print(f"PaddleOCR初始化失败: {str(e)}")

    def load_text_document(self):
        if not os.path.exists(self.directory_path):
            os.makedirs(self.directory_path)
            print(f"创建了根目录{self.directory_path}")
        txt_files = glob.glob(f"{self.directory_path}/*.txt")
        if not txt_files:
            print(f"在 {self.directory_path} 中未找到任何txt文件，跳过加载")
            return
        try :
            loader = DirectoryLoader(path=self.directory_path,glob="*.txt",show_progress=True)
            raw_documents = loader.load()
            split_docs = self.text_splitter.split_documents(raw_documents)
            self.documents.extend(split_docs)
            print(f"成功加载并分割 {len(raw_documents)} 个txt文件，获得 {len(split_docs)} 个文档块")
        except Exception as e:
            print(f"加载txt文件时出错{e}")

    def load_markdown_document(self, markdown_path):
        # 检查路径是否存在
        if not os.path.exists(markdown_path):
            print(f"Markdown路径不存在: {markdown_path}，跳过加载")
            return

        # 如果是目录，查找所有md文件；如果是文件，直接加载
        if os.path.isdir(markdown_path):
            md_files = glob.glob(f"{markdown_path}/*.md")
            if not md_files:
                print(f"在 {markdown_path} 中未找到任何md文件，跳过加载")
                return

            total_docs = 0
            total_chunks = 0
            for file in md_files:
                try:
                    loader = UnstructuredMarkdownLoader(str(file))
                    docs = loader.load()
                    split_docs = self.text_splitter.split_documents(docs)
                    self.documents.extend(split_docs)
                    total_docs += len(docs)
                    total_chunks += len(split_docs)
                except Exception as e:
                    print(f"加载文件 {file} 时出错: {str(e)}")

            print(f"成功加载并分割 {total_docs} 个md文件，获得 {total_chunks} 个文档块")

        else:  # 处理单个md文件
            try:
                loader = UnstructuredMarkdownLoader(file_path=markdown_path)
                markdown_documents = loader.load()
                split_md = self.text_splitter.split_documents(markdown_documents)
                self.documents.extend(split_md)
                print(f"成功加载并分割 1 个md文件，获得 {len(split_md)} 个文档块")
            except Exception as e:
                print(f"加载md文件时出错: {str(e)}")

    def preprocess_image(self, image_path):
        """增强图像质量以提高OCR识别率"""
        img = Image.open(image_path)

        # 1. 转为灰度图
        img = img.convert('L')

        # 2. 二值化处理
        img = img.point(lambda x: 0 if x < 200 else 255, 'L')

        # 3. 对比度增强
        enhancer = ImageEnhance.Contrast(img)
        img = enhancer.enhance(2.0)

        # 4. 锐化处理
        enhancer = ImageEnhance.Sharpness(img)
        img = enhancer.enhance(2.0)

        # 保存处理后的图像
        processed_path = image_path.replace('.jpg', '_processed.jpg')
        img.save(processed_path)
        return processed_path

    def load_docx(self, doc_path):
        if not os.path.exists(doc_path):
            print(f"路径不存在: {doc_path}，跳过加载")
            return

        # 如果是目录，查找 docx 和 txt 文件
        if os.path.isdir(doc_path):
            doc_files = glob.glob(f"{doc_path}/*.docx") + glob.glob(f"{doc_path}/*.txt")
            if not doc_files:
                print(f"在 {doc_path} 中未找到任何 docx/txt 文件，跳过加载")
                return
        else:
            doc_files = [doc_path]

        total_docs = 0
        total_chunks = 0

        for file in doc_files:
            try:
                if file.endswith(".docx"):
                    loader = UnstructuredWordDocumentLoader(file)
                elif file.endswith(".txt"):
                    try:
                        loader = TextLoader(file, encoding="utf-8")
                    except UnicodeDecodeError:
                        loader = TextLoader(file, encoding="gbk")

                else:
                    print(f"暂不支持的文件格式: {file}")
                    continue

                docs = loader.load()
                split_docs = self.text_splitter.split_documents(docs)
                self.documents.extend(split_docs)
                total_docs += len(docs)
                total_chunks += len(split_docs)
            except Exception as e:
                print(f"加载文件 {file} 时出错: {str(e)}")

        print(f"成功加载并分割 {total_docs} 个文档，获得 {total_chunks} 个文档块")

#    def load_pdf_with_ocr(self, pdf_path):
#        if not os.path.exists(pdf_path):
#            print(f"PDF路径不存在: {pdf_path}，跳过加载")
#            return
#
#        if self.ocr is None:
#            print("PaddleOCR未初始化，无法处理PDF")
#            return
#
#        pdf_files = []
#        if os.path.isdir(pdf_path):
#            pdf_files = glob.glob(f"{pdf_path}/*.pdf")
#            if not pdf_files:
#                print(f"在 {pdf_path} 中未找到任何PDF文件，跳过加载")
#                return
#        else:
#            pdf_files.append(pdf_path)
#
#        total_pages = 0
#        total_chunks = 0
#
#        for pdf_file in pdf_files:
#            try:
#                print(f"开始处理PDF文件: {pdf_file}")
#                temp_dir = tempfile.mkdtemp()
#                try:
#                    # 关键改进1: 提高DPI到300
#                    pages = convert_from_path(
#                        pdf_file,
#                        output_folder=temp_dir,
#                        fmt="jpeg",
#                        thread_count=4,  # 增加线程数
#                        dpi=300,  # 关键：提高分辨率
#                        grayscale=True  # 转为灰度图
#                    )
#                    total_pages += len(pages)
#
#                    full_text = []
#                    for i, page in enumerate(pages):
#                        img_path = os.path.join(temp_dir, f"page_{i}.jpg")
#                        page.save(img_path, "JPEG")
#
#                        # 关键改进2: 图像预处理
#                        processed_img = self.preprocess_image(img_path)
#
#                        # 关键改进3: 使用版面分析
#                        result = self.ocr.predict(processed_img)
#
#                        page_text = []
#                        if result and result[0]:
#                            # 按阅读顺序排序文本块
#                            sorted_result = sorted(result[0], key=lambda x: (x[0][0][1], x[0][0][0]))
#
#                            for line in sorted_result:
#                                # 过滤低置信度结果
#                                if line[1][1] > 0.7:  # 置信度阈值
#                                    text = line[1][0].strip()
#                                    # 过滤无意义字符
#                                    if re.search(r'[a-zA-Z0-9\u4e00-\u9fff]', text):
#                                        page_text.append(text)
#
#                        if page_text:
#                            # 保留段落结构
#                            full_text.append("\n".join(page_text))
#
#                    if not full_text:
#                        print(f"警告: {os.path.basename(pdf_file)} 未识别到有效文本")
#                        continue
#
#                    # 生成文档对象
#                    doc = Document(
#                        page_content="\n\n".join(full_text),
#                        metadata={"source": pdf_file}
#                    )
#                    self.save_pdf_as_docx(pdf_file, doc.page_content)
#
#                    # 分割文档
#                    split_docs = self.text_splitter.split_documents([doc])
#                    self.documents.extend(split_docs)
#                    total_chunks += len(split_docs)
#
#                    print(f"成功处理PDF文件: {os.path.basename(pdf_file)}，"
#                          f"共{len(pages)}页，生成 {len(split_docs)} 个文档块")
#
#                finally:
#                    shutil.rmtree(temp_dir, ignore_errors=True)
#
#            except Exception as e:
#                print(f"处理PDF文件 {pdf_file} 时出错: {str(e)}")
#
#        print(f"总计处理 {len(pdf_files)} 个PDF文件，共 {total_pages} 页，获得 {total_chunks} 个文档块")

    def save_pdf_as_docx(self, pdf_file, text_content, output_dir="./output_docs"):
        """优化DOCX保存 - 保留段落结构"""
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        base_name = os.path.basename(pdf_file)
        docx_name = os.path.splitext(base_name)[0] + ".docx"
        docx_path = os.path.join(output_dir, docx_name)

        docx_doc = DocxDocument()

        # 按段落分割文本
        paragraphs = text_content.split('\n\n')
        for para in paragraphs:
            if para.strip():
                docx_doc.add_paragraph(para.strip())

        docx_doc.save(docx_path)
        print(f"已保存DOCX文档: {docx_path}")

    def validate_document_and_clean_data(self):
        #去除空内容
        self.documents = [doc for doc in self.documents if doc.page_content.strip()]
        #去重处理
        unique_docs = {doc.page_content:doc for doc in self.documents}
        self.documents = list(unique_docs.values())

        print(f"去重后的剩余文档{len(self.documents)}")

    def load_all_documents(self,file_path):
        self.load_text_document()
        self.load_markdown_document(file_path)
        # self.load_pdf_with_ocr(file_path)
        self.load_docx(file_path)
        #数据清理
        self.validate_document_and_clean_data()

    def display_summary(self):
        print(f"文件总数{len(self.documents)}")
        for i,doc in enumerate(self.documents[:30]):
            print(f"\n文档{i+1}内容预览:\n{doc.page_content[:200]}...")

if __name__ == "__main__":
    #初始化dataloadermodeule,指定路径
    data_loader = DataloaderModule(directory_path="./data")
    data_loader.load_all_documents(file_path="./data")
    #加载处理结果
    data_loader.display_summary()