# -*- coding: utf-8 -*-
# @Time: 2025/6/29 11:37
# @Author: wzd
# @Email: 2146333089@qq.com
# @File: parse.py

import os
import re
import fitz  # PyMuPDF
import pdfplumber
from docx import Document
import pytesseract
from pdf2image import convert_from_path
from langchain_community.document_loaders import (
    PyPDFLoader,
    UnstructuredEPubLoader,
    Docx2txtLoader,
    TextLoader
)
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain.text_splitter import RecursiveCharacterTextSplitter
from concurrent.futures import ThreadPoolExecutor
from typing import List, Dict, Tuple, Optional
import logging
from ebooklib import epub
import warnings

# 忽略弃用警告
warnings.filterwarnings("ignore", category=DeprecationWarning)

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)


class DocumentParser:
    def __init__(self, ocr_language: str = 'chi_sim+eng', chunk_size: int = 1000, chunk_overlap: int = 200):
        """
        初始化文档解析器（移除布局分析依赖）

        Args:
            ocr_language: OCR识别语言，默认'chi_sim+eng'（简体中文+英文）
            chunk_size: 文本分块大小
            chunk_overlap: 分块重叠大小
        """
        self.ocr_language = ocr_language
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            separators=["\n\n", "\n", " ", ""]
        )
        logger.info("布局分析功能已移除，仅保留基础OCR和文本提取")

    def parse(self, file_path: str, enable_ocr: bool = False, extract_tables: bool = False) -> Dict:
        """
        解析文档主函数

        Args:
            file_path: 文档路径
            enable_ocr: 是否启用OCR（针对扫描文档）
            extract_tables: 是否提取表格（仅支持PDF/Word的基础表格提取）

        Returns:
            包含解析结果的字典
        """
        if not os.path.exists(file_path):
            raise FileNotFoundError(f"文件不存在: {file_path}")

        file_ext = os.path.splitext(file_path)[1].lower()

        if file_ext == '.pdf':
            return self._parse_pdf(file_path, enable_ocr, extract_tables)
        elif file_ext == '.epub':
            return self._parse_epub(file_path)
        elif file_ext == '.docx':
            return self._parse_docx(file_path, extract_tables)
        elif file_ext == '.txt':
            return self._parse_txt(file_path)
        else:
            raise ValueError(f"不支持的文件格式: {file_ext}")

    def _parse_pdf(self, pdf_path: str, enable_ocr: bool = False, extract_tables: bool = False) -> Dict:
        """解析PDF文档（简化版，移除布局分析）"""
        logger.info(f"开始解析PDF: {pdf_path}")
        result = {
            'metadata': {},
            'text': '',
            'formatted_text': [],
            'tables': [],
            'pages': [],
            'chunks': []
        }

        try:
            with fitz.open(pdf_path) as doc:
                result['metadata'] = doc.metadata

                if enable_ocr:
                    # 扫描版PDF：直接OCR（移除布局分析）
                    result['text'] = self._ocr_pdf(pdf_path)
                else:
                    # 文本型PDF：直接提取
                    for page_num in range(len(doc)):
                        page = doc.load_page(page_num)
                        page_text = page.get_text("text")
                        result['text'] += page_text

                        # 提取带格式的文本（段落级）
                        blocks = page.get_text("blocks")
                        formatted_blocks = []
                        for block in blocks:
                            block_text = block[4].strip()
                            if block_text:
                                formatted_blocks.append({
                                    'text': block_text,
                                    'bbox': block[:4],  # 位置信息
                                    'type': block[6]  # 0=文本，1=图像，2=其他
                                })
                        result['pages'].append({
                            'page_num': page_num + 1,
                            'text': page_text,
                            'formatted_blocks': formatted_blocks
                        })

                # 提取表格（使用pdfplumber的基础表格提取）
                if extract_tables:
                    result['tables'] = self._extract_tables_from_pdf(pdf_path)

        except Exception as e:
            logger.error(f"解析PDF时出错: {e}", exc_info=True)
            raise

        # 分块处理
        result['chunks'] = self._split_text(result['text'])
        logger.info(f"PDF解析完成，文本长度: {len(result['text'])}")
        return result

    def _ocr_pdf(self, pdf_path: str) -> str:
        """对PDF进行OCR识别（简化版，直接提取不做布局分析）"""
        logger.info(f"对PDF执行OCR: {pdf_path}")
        text = ""
        images = convert_from_path(pdf_path)

        # 配置Tesseract路径（Windows用户需要取消注释并修改路径）
        # if os.name == 'nt':  # Windows系统
        #     pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'

        try:
            with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
                futures = [executor.submit(self._process_image_with_ocr, image) for image in images]
                for future in futures:
                    text += future.result()
        except Exception as e:
            logger.error(f"OCR处理时出错: {e}")

        return text

    def _process_image_with_ocr(self, image) -> str:
        """对单张图片进行OCR识别（移除布局分析，直接提取文本）"""
        try:
            return pytesseract.image_to_string(image, lang=self.ocr_language)
        except Exception as e:
            logger.error(f"OCR识别时出错: {e}")
            return ""

    def _extract_tables_from_pdf(self, pdf_path: str) -> List[Dict]:
        """从PDF中提取表格（使用pdfplumber的基础功能）"""
        logger.info(f"从PDF提取表格: {pdf_path}")
        tables = []
        try:
            with pdfplumber.open(pdf_path) as pdf:
                for page_num, page in enumerate(pdf.pages):
                    page_tables = page.extract_tables()
                    if page_tables:
                        for table_idx, table_data in enumerate(page_tables):
                            tables.append({
                                'page': page_num + 1,
                                'table_index': table_idx,
                                'data': table_data
                            })
        except Exception as e:
            logger.error(f"提取表格时出错: {e}")

        logger.info(f"成功提取{len(tables)}个表格")
        return tables

    def _parse_epub(self, epub_path: str) -> Dict:
        """解析EPUB电子书（功能不变）"""
        logger.info(f"开始解析EPUB: {epub_path}")
        result = {
            'metadata': {},
            'text': '',
            'chapters': {},
            'chunks': []
        }

        try:
            book = epub.read_epub(epub_path)
            result['metadata'] = {
                'title': book.get_metadata('DC', 'title')[0][0] if book.get_metadata('DC', 'title') else "未知标题",
                'author': book.get_metadata('DC', 'creator')[0][0] if book.get_metadata('DC',
                                                                                        'creator') else "未知作者",
                'language': book.get_metadata('DC', 'language')[0][0] if book.get_metadata('DC',
                                                                                           'language') else "未知语言",
                'publisher': book.get_metadata('DC', 'publisher')[0][0] if book.get_metadata('DC',
                                                                                             'publisher') else "未知出版商"
            }

            # 提取章节内容
            text_content = []
            for item in book.get_items_of_type(epub.ITEM_DOCUMENT):
                chapter_title = item.get_name()
                chapter_content = item.get_content().decode('utf-8')
                clean_text = re.sub(r'<[^>]+>', '', chapter_content)  # 清理HTML标签
                result['chapters'][chapter_title] = clean_text
                text_content.append(clean_text)

            result['text'] = "\n\n".join(text_content)
            result['chunks'] = self._split_text(result['text'])

        except epub.exceptions.PackageNotFoundError:
            logger.error(f"EPUB格式错误: {epub_path}")
            raise
        except Exception as e:
            logger.error(f"解析EPUB时出错: {e}", exc_info=True)
            raise

        logger.info(f"EPUB解析完成，文本长度: {len(result['text'])}")
        return result

    def _parse_docx(self, docx_path: str, extract_tables: bool = False) -> Dict:
        """解析Word文档（表格提取使用原生功能）"""
        logger.info(f"开始解析DOCX: {docx_path}")
        result = {
            'metadata': {},
            'text': '',
            'formatted_text': [],
            'tables': [],
            'chunks': []
        }

        try:
            doc = Document(docx_path)

            # 提取文本
            paragraphs = []
            for para in doc.paragraphs:
                paragraphs.append(para.text)
                result['formatted_text'].append({
                    'text': para.text,
                    'style': para.style.name if para.style else "Normal",
                    'is_title': self._is_title_paragraph(para)
                })

            result['text'] = "\n\n".join(paragraphs)

            # 提取表格（使用python-docx原生功能）
            if extract_tables:
                for table_idx, table in enumerate(doc.tables):
                    table_data = []
                    for row in table.rows:
                        row_data = [cell.text for cell in row.cells]
                        table_data.append(row_data)
                    result['tables'].append({
                        'table_index': table_idx,
                        'data': table_data
                    })

            result['chunks'] = self._split_text(result['text'])

        except Exception as e:
            logger.error(f"解析DOCX时出错: {e}", exc_info=True)
            raise

        logger.info(f"DOCX解析完成，文本长度: {len(result['text'])}")
        return result

    def _is_title_paragraph(self, paragraph) -> bool:
        """判断段落是否为标题（功能不变）"""
        if not paragraph.style:
            return False
        style_name = paragraph.style.name.lower()
        return any(keyword in style_name for keyword in ['title', 'heading', 'header', 'caption'])

    def _parse_txt(self, txt_path: str) -> Dict:
        """解析纯文本文件（功能不变）"""
        logger.info(f"开始解析TXT: {txt_path}")
        result = {
            'metadata': {'path': txt_path},
            'text': '',
            'chunks': []
        }

        try:
            with open(txt_path, 'r', encoding='utf-8') as f:
                result['text'] = f.read()
            result['chunks'] = self._split_text(result['text'])

        except UnicodeDecodeError:
            # 尝试其他编码
            try:
                with open(txt_path, 'r', encoding='gbk') as f:
                    result['text'] = f.read()
                result['chunks'] = self._split_text(result['text'])
                logger.warning(f"TXT文件使用GBK编码: {txt_path}")
            except Exception as e:
                logger.error(f"解析TXT时出错: {e}")
                raise
        except Exception as e:
            logger.error(f"解析TXT时出错: {e}", exc_info=True)
            raise

        logger.info(f"TXT解析完成，文本长度: {len(result['text'])}")
        return result

    def _split_text(self, text: str) -> List[Dict]:
        """将文本分割成块（功能不变）"""
        if not text:
            return []

        try:
            chunks = self.text_splitter.create_documents([text])
            return [
                {
                    'content': chunk.page_content,
                    'metadata': chunk.metadata,
                    'length': len(chunk.page_content)
                }
                for chunk in chunks
            ]
        except Exception as e:
            logger.error(f"文本分块时出错: {e}")
            return []

    def create_vector_db(self, file_path: str, embeddings_model: str = "text-embedding-ada-002") -> Chroma:
        """
        创建向量数据库（功能不变）

        Args:
            file_path: 文档路径
            embeddings_model: 嵌入模型名称

        Returns:
            Chroma向量数据库实例
        """
        if not os.getenv("OPENAI_API_KEY"):
            raise EnvironmentError("请设置OPENAI_API_KEY环境变量")

        logger.info(f"开始为文件创建向量数据库: {file_path}")

        try:
            # 解析文档
            doc_data = self.parse(file_path)

            # 创建嵌入
            embeddings = OpenAIEmbeddings(model=embeddings_model)

            # 准备文档数据
            documents = [chunk['content'] for chunk in doc_data['chunks'] if chunk['content']]
            metadatas = [chunk['metadata'] for chunk in doc_data['chunks'] if chunk['content']]

            if not documents:
                raise ValueError("没有可用于创建向量数据库的文本内容")

            # 创建向量数据库
            vector_db = Chroma.from_texts(
                texts=documents,
                embedding=embeddings,
                metadatas=metadatas,
                persist_directory=f"vector_db/{os.path.basename(file_path)}"
            )

            # 持久化保存
            vector_db.persist()
            logger.info(f"向量数据库创建成功，保存至: vector_db/{os.path.basename(file_path)}")
            return vector_db

        except Exception as e:
            logger.error(f"创建向量数据库时出错: {e}", exc_info=True)
            raise


# 使用示例
if __name__ == "__main__":
    # 示例文件路径
    pdf_file = r"C:\cnsoft\电子书籍资料\Python程序设计（第3版）.pdf"
    docx_file = r"C:\cnsoft\电子书籍资料\《嵌入式Linux开发实践教程》示例资源-word版课件\cp07-样章示例-TensorFlow.js应用开发.docx"  # 替换为实际的文件路径

    txt_file = r"C:\file_data\pycharm_project\django\IntelligentTeachingBackend\courses\tools\output\docx_outline.txt"

    try:
        # 初始化解析器（中文优先）
        parser = DocumentParser(ocr_language='chi_sim+eng')

        # 解析PDF
        if os.path.exists(pdf_file):
            pdf_result = parser.parse(pdf_file, enable_ocr=True, extract_tables=True)
            print(f"PDF解析结果 - 文本长度: {len(pdf_result['text'])}")
            print(f"PDF解析结果 - 表格数量: {len(pdf_result['tables'])}")

            # 创建向量数据库
            try:
                print("准备创建向量数据库")
                vector_db = parser.create_vector_db(pdf_file)
                print("向量数据库创建成功")
            except Exception as e:
                print(f"向量数据库创建失败: {e}")

        # 解析DOCX
        if os.path.exists(docx_file):
            docx_result = parser.parse(docx_file, extract_tables=True)
            print(f"DOCX解析结果 - 文本长度: {len(docx_result['text'])}")
            print(f"DOCX解析结果 - 表格数量: {len(docx_result['tables'])}")

        # 解析TXT
        if os.path.exists(txt_file):
            txt_result = parser.parse(txt_file)
            print(f"TXT解析结果 - 文本长度: {len(txt_result['text'])}")

    except Exception as e:
        print(f"示例运行出错: {e}")