#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
增强版PDF解析器
解决原有解析器的问题，提供更好的文档结构化和内容清理
"""

import re
import json
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from loguru import logger
import fitz  # PyMuPDF
import pdfplumber


class EnhancedPDFParser:
    """增强版PDF解析器"""
    
    def __init__(self):
        self.header_patterns = [
            r"^第[一二三四五六七八九十\d]+章",
            r"^[一二三四五六七八九十]+、",
            r"^\d+\.",
            r"^\([一二三四五六七八九十\d]+\)",
            r"^[A-Z]\.",
        ]
        
        self.section_patterns = [
            r"第[一二三四五六七八九十\d]+章\s*[^\n]+",
            r"[一二三四五六七八九十]+、\s*[^\n]+",
            r"\d+\.\s*[^\n]+",
            r"\([一二三四五六七八九十\d]+\)\s*[^\n]+",
        ]
    
    def parse_pdf(self, file_path: str) -> Dict:
        """解析PDF文件"""
        try:
            file_path = Path(file_path)
            
            if not file_path.exists():
                raise FileNotFoundError(f"文件不存在: {file_path}")
            
            logger.info(f"开始解析PDF文件: {file_path}")
            
            # 使用PyMuPDF提取基本信息
            doc_info = self._extract_with_pymupdf(file_path)
            
            # 使用pdfplumber提取表格
            table_info = self._extract_tables_with_pdfplumber(file_path)
            
            # 清理和增强文本内容
            enhanced_pages = self._enhance_pages_content(doc_info["pages"])
            
            # 提取文档结构
            document_structure = self._extract_document_structure(enhanced_pages)
            
            # 清理表格数据
            cleaned_tables = self._clean_table_data(table_info["tables"])
            
            # 生成增强摘要
            summary = self._generate_enhanced_summary(enhanced_pages, cleaned_tables, document_structure)
            
            result = {
                "file_path": str(file_path),
                "file_name": file_path.name,
                "total_pages": doc_info["total_pages"],
                "metadata": doc_info["metadata"],
                "pages": enhanced_pages,
                "tables": cleaned_tables,
                "images": doc_info["images"],
                "document_structure": document_structure,
                "summary": summary
            }
            
            logger.info(f"PDF解析完成: {file_path.name}, 共{result['total_pages']}页")
            return result
            
        except Exception as e:
            logger.error(f"PDF解析失败: {e}")
            raise
    
    def _extract_with_pymupdf(self, file_path: Path) -> Dict:
        """使用PyMuPDF提取基本信息"""
        doc = fitz.open(file_path)
        
        result = {
            "total_pages": len(doc),
            "metadata": dict(doc.metadata),
            "pages": [],
            "images": []
        }
        
        for page_num in range(len(doc)):
            page = doc[page_num]
            
            # 提取文本
            text = page.get_text()
            
            # 提取文本块（包含位置信息）
            text_blocks = page.get_text("dict")
            
            # 提取图片
            page_images = self._extract_images_from_page(page, page_num)
            result["images"].extend(page_images)
            
            # 页面信息
            page_info = {
                "page_number": page_num + 1,
                "text": text.strip(),
                "text_blocks": self._process_text_blocks(text_blocks),
                "image_count": len(page_images),
                "bbox": list(page.rect.irect)
            }
            
            result["pages"].append(page_info)
        
        doc.close()
        return result
    
    def _extract_tables_with_pdfplumber(self, file_path: Path) -> Dict:
        """使用pdfplumber提取表格"""
        tables = []
        
        try:
            with pdfplumber.open(file_path) as pdf:
                for page_num, page in enumerate(pdf.pages):
                    page_tables = page.extract_tables()
                    
                    for table_idx, table in enumerate(page_tables):
                        if table and self._is_valid_table(table):
                            table_info = {
                                "page_number": page_num + 1,
                                "table_index": table_idx,
                                "rows": len(table),
                                "columns": len(table[0]) if table else 0,
                                "data": table,
                                "text_representation": self._table_to_text(table)
                            }
                            tables.append(table_info)
        
        except Exception as e:
            logger.warning(f"表格提取失败: {e}")
        
        return {"tables": tables}
    
    def _enhance_pages_content(self, pages: List[Dict]) -> List[Dict]:
        """增强页面内容"""
        enhanced_pages = []
        
        for page in pages:
            enhanced_page = page.copy()
            
            # 清理文本内容
            cleaned_text = self._clean_text_content(page["text"])
            enhanced_page["cleaned_text"] = cleaned_text
            
            # 提取页面结构
            page_structure = self._extract_page_structure(cleaned_text)
            enhanced_page["structure"] = page_structure
            
            # 识别页面类型
            page_type = self._identify_page_type(cleaned_text)
            enhanced_page["page_type"] = page_type
            
            # 提取关键信息
            key_info = self._extract_page_key_info(cleaned_text)
            enhanced_page["key_info"] = key_info
            
            enhanced_pages.append(enhanced_page)
        
        return enhanced_pages
    
    def _clean_text_content(self, text: str) -> str:
        """清理文本内容"""
        if not text:
            return ""
        
        # 移除多余的换行符和空格
        text = re.sub(r'\n+', '\n', text)
        text = re.sub(r' +', ' ', text)
        
        # 移除页眉页脚（常见模式）
        lines = text.split('\n')
        cleaned_lines = []
        
        for line in lines:
            line = line.strip()
            
            # 跳过页眉页脚
            if self._is_header_footer(line):
                continue
            
            # 跳过页码行
            if self._is_page_number_line(line):
                continue
            
            # 跳过空行
            if not line:
                continue
            
            cleaned_lines.append(line)
        
        return '\n'.join(cleaned_lines)
    
    def _is_header_footer(self, line: str) -> bool:
        """判断是否为页眉页脚"""
        if not line:
            return True
        
        # 常见的页眉页脚模式
        header_footer_patterns = [
            r"^零售贷款系统操作手册\s*$",
            r"^XX银行.*操作手册\s*$",
            r"^\d+\s*$",  # 单独的页码
            r"^第\s*\d+\s*页\s*$",
        ]
        
        for pattern in header_footer_patterns:
            if re.match(pattern, line):
                return True
        
        return False
    
    def _is_page_number_line(self, line: str) -> bool:
        """判断是否为页码行"""
        # 匹配单独的数字或简单的页码格式
        return bool(re.match(r'^\d+\s*$', line.strip()))
    
    def _extract_page_structure(self, text: str) -> Dict:
        """提取页面结构"""
        structure = {
            "headers": [],
            "sections": [],
            "lists": [],
            "paragraphs": []
        }
        
        lines = text.split('\n')
        
        for line in lines:
            line = line.strip()
            if not line:
                continue
            
            # 识别标题
            if self._is_header_line(line):
                structure["headers"].append(line)
            
            # 识别章节
            elif self._is_section_line(line):
                structure["sections"].append(line)
            
            # 识别列表项
            elif self._is_list_item(line):
                structure["lists"].append(line)
            
            # 其他作为段落
            else:
                structure["paragraphs"].append(line)
        
        return structure
    
    def _is_header_line(self, line: str) -> bool:
        """判断是否为标题行"""
        for pattern in self.header_patterns:
            if re.match(pattern, line):
                return True
        return False
    
    def _is_section_line(self, line: str) -> bool:
        """判断是否为章节行"""
        for pattern in self.section_patterns:
            if re.match(pattern, line):
                return True
        return False
    
    def _is_list_item(self, line: str) -> bool:
        """判断是否为列表项"""
        list_patterns = [
            r"^[•·▪▫◦‣⁃]\s+",
            r"^[-*+]\s+",
            r"^\d+[.)]\s+",
            r"^[a-zA-Z][.)]\s+",
        ]
        
        for pattern in list_patterns:
            if re.match(pattern, line):
                return True
        return False
    
    def _identify_page_type(self, text: str) -> str:
        """识别页面类型"""
        text_lower = text.lower()
        
        if "目录" in text and len(text.split('\n')) > 10:
            return "目录页"
        elif "菜单路径" in text and "功能描述" in text and "操作步骤" in text:
            return "操作说明页"
        elif "注意事项" in text:
            return "注意事项页"
        elif len(text.split('\n')) < 5:
            return "简单页面"
        else:
            return "内容页"
    
    def _extract_page_key_info(self, text: str) -> Dict:
        """提取页面关键信息"""
        key_info = {
            "menu_paths": [],
            "operation_steps": [],
            "warnings": [],
            "important_notes": []
        }
        
        lines = text.split('\n')
        
        for line in lines:
            line = line.strip()
            
            # 提取菜单路径
            if "菜单路径" in line or "路径" in line:
                key_info["menu_paths"].append(line)
            
            # 提取操作步骤
            elif re.match(r'^\d+[.)]\s*', line) and any(keyword in line for keyword in ["点击", "选择", "输入", "确认"]):
                key_info["operation_steps"].append(line)
            
            # 提取警告信息
            elif any(keyword in line for keyword in ["注意", "警告", "重要", "禁止"]):
                key_info["warnings"].append(line)
            
            # 提取重要说明
            elif any(keyword in line for keyword in ["说明", "备注", "提示"]):
                key_info["important_notes"].append(line)
        
        return key_info

    def _extract_document_structure(self, pages: List[Dict]) -> Dict:
        """提取整个文档的结构"""
        structure = {
            "chapters": [],
            "sections": [],
            "total_sections": 0,
            "navigation": []
        }

        for page in pages:
            page_structure = page.get("structure", {})

            # 收集章节信息
            for header in page_structure.get("headers", []):
                if "章" in header or re.match(r'^[一二三四五六七八九十]+、', header):
                    structure["chapters"].append({
                        "title": header,
                        "page": page["page_number"]
                    })

            # 收集小节信息
            for section in page_structure.get("sections", []):
                structure["sections"].append({
                    "title": section,
                    "page": page["page_number"]
                })

        structure["total_sections"] = len(structure["sections"])

        # 生成导航结构
        structure["navigation"] = self._build_navigation(structure["chapters"], structure["sections"])

        return structure

    def _build_navigation(self, chapters: List[Dict], sections: List[Dict]) -> List[Dict]:
        """构建导航结构"""
        navigation = []

        # 按页码排序
        all_items = chapters + sections
        all_items.sort(key=lambda x: x["page"])

        current_chapter = None

        for item in all_items:
            if item in chapters:
                current_chapter = {
                    "type": "chapter",
                    "title": item["title"],
                    "page": item["page"],
                    "sections": []
                }
                navigation.append(current_chapter)
            elif current_chapter:
                current_chapter["sections"].append({
                    "type": "section",
                    "title": item["title"],
                    "page": item["page"]
                })

        return navigation

    def _clean_table_data(self, tables: List[Dict]) -> List[Dict]:
        """清理表格数据"""
        cleaned_tables = []

        for table in tables:
            # 移除空行
            cleaned_data = []
            for row in table["data"]:
                if any(cell and str(cell).strip() for cell in row):
                    cleaned_row = [str(cell).strip() if cell else "" for cell in row]
                    cleaned_data.append(cleaned_row)

            if cleaned_data:  # 只保留有数据的表格
                cleaned_table = table.copy()
                cleaned_table["data"] = cleaned_data
                cleaned_table["rows"] = len(cleaned_data)
                cleaned_table["text_representation"] = self._table_to_text(cleaned_data)
                cleaned_tables.append(cleaned_table)

        return cleaned_tables

    def _generate_enhanced_summary(self, pages: List[Dict], tables: List[Dict], structure: Dict) -> Dict:
        """生成增强摘要"""
        total_text_length = sum(len(page.get("cleaned_text", "")) for page in pages)

        # 统计页面类型
        page_types = {}
        for page in pages:
            page_type = page.get("page_type", "未知")
            page_types[page_type] = page_types.get(page_type, 0) + 1

        # 提取关键功能
        key_functions = self._extract_key_functions(pages)

        return {
            "total_pages": len(pages),
            "total_text_length": total_text_length,
            "total_images": sum(page.get("image_count", 0) for page in pages),
            "total_tables": len(tables),
            "avg_text_per_page": total_text_length // len(pages) if pages else 0,
            "has_images": any(page.get("image_count", 0) > 0 for page in pages),
            "has_tables": len(tables) > 0,
            "document_type": "银行零售贷款系统操作手册",
            "page_types": page_types,
            "chapters_count": len(structure.get("chapters", [])),
            "sections_count": structure.get("total_sections", 0),
            "key_functions": key_functions,
            "structure_quality": self._assess_structure_quality(structure)
        }

    def _extract_key_functions(self, pages: List[Dict]) -> List[str]:
        """提取关键功能"""
        functions = []

        for page in pages:
            key_info = page.get("key_info", {})

            # 从菜单路径提取功能
            for menu_path in key_info.get("menu_paths", []):
                if "-" in menu_path:
                    function_name = menu_path.split("-")[-1].strip()
                    if function_name and function_name not in functions:
                        functions.append(function_name)

        return functions[:20]  # 限制数量

    def _assess_structure_quality(self, structure: Dict) -> str:
        """评估文档结构质量"""
        chapters_count = len(structure.get("chapters", []))
        sections_count = structure.get("total_sections", 0)

        if chapters_count >= 5 and sections_count >= 20:
            return "优秀"
        elif chapters_count >= 3 and sections_count >= 10:
            return "良好"
        elif chapters_count >= 1 and sections_count >= 5:
            return "一般"
        else:
            return "较差"

    def _is_valid_table(self, table: List[List]) -> bool:
        """判断表格是否有效"""
        if not table or len(table) < 2:
            return False

        # 检查是否有足够的非空单元格
        non_empty_cells = 0
        total_cells = 0

        for row in table:
            for cell in row:
                total_cells += 1
                if cell and str(cell).strip():
                    non_empty_cells += 1

        # 至少30%的单元格有内容
        return non_empty_cells / total_cells >= 0.3 if total_cells > 0 else False

    def _table_to_text(self, table: List[List]) -> str:
        """将表格转换为文本表示"""
        if not table:
            return ""

        try:
            text_lines = []
            for row in table:
                clean_row = [str(cell).strip() if cell else "" for cell in row]
                text_lines.append(" | ".join(clean_row))

            return "\n".join(text_lines)

        except Exception as e:
            logger.warning(f"表格转文本失败: {e}")
            return ""

    def _process_text_blocks(self, text_dict: Dict) -> List[Dict]:
        """处理文本块（简化版本，只保留必要信息）"""
        blocks = []

        try:
            for block in text_dict.get("blocks", []):
                if block.get("type") == 0:  # 文本块
                    block_text = ""
                    for line in block.get("lines", []):
                        for span in line.get("spans", []):
                            block_text += span.get("text", "")

                    if block_text.strip():
                        blocks.append({
                            "type": "text",
                            "text": block_text.strip(),
                            "bbox": block.get("bbox", [])
                        })

        except Exception as e:
            logger.warning(f"文本块处理失败: {e}")

        return blocks

    def _extract_images_from_page(self, page, page_num: int) -> List[Dict]:
        """从页面提取图片信息（不保存实际图片数据）"""
        images = []

        try:
            image_list = page.get_images()

            for img_index, img in enumerate(image_list):
                image_info = {
                    "page_number": page_num + 1,
                    "image_index": img_index,
                    "xref": img[0],
                    "width": img[2],
                    "height": img[3],
                    "colorspace": img[4],
                    "has_image_data": True
                }
                images.append(image_info)

        except Exception as e:
            logger.warning(f"图片提取失败: {e}")

        return images

    def save_parsed_data(self, parsed_data: Dict, output_path: str) -> str:
        """保存解析结果到JSON文件"""
        try:
            output_path = Path(output_path)
            output_path.parent.mkdir(parents=True, exist_ok=True)

            with open(output_path, 'w', encoding='utf-8') as f:
                json.dump(parsed_data, f, ensure_ascii=False, indent=2)

            logger.info(f"解析结果已保存到: {output_path}")
            return str(output_path)

        except Exception as e:
            logger.error(f"保存解析结果失败: {e}")
            raise
