# -*- coding: utf-8 -*-
"""
PDF 加载器（依赖 BasePdfLoader 通用流程）
仅支持 应急预案 文件的处理方式
todo: 应该把图片下载放在类中，这样结构才完整
"""
import json
from pathlib import Path
import re
import requests
import os
from loguru import logger
from typing import Dict

try:
    from document_loader.pdf_utils import PDFMetadata, BasePdfLoader, PdfCommonUtils
except ImportError:
    import sys

    CURRENT_DIR = os.path.dirname(__file__)
    PROJECT_ROOT = os.path.abspath(os.path.join(CURRENT_DIR, os.pardir))
    if PROJECT_ROOT not in sys.path:
        sys.path.append(PROJECT_ROOT)
    from document_loader.pdf_utils import PDFMetadata, BasePdfLoader, PdfCommonUtils


class EpPdfLoader(BasePdfLoader):
    """
    应急预案
    """
    def __init__(self, file_path: str, delete_old_file: bool = False):
        super().__init__(file_path, delete_old_file)
        self.delete_old_file = delete_old_file
        self.API_URL = "http://60.205.246.174:8999/service/v1/pdf_to_markdown"
        self.headers = {
            "x-ti-app-id": "8e5ac65e5c09273415e4d6275493e3c3",
            "x-ti-secret-code": "f240bfbfcebbb42f9be0dafce5c0f44d",
        }

        # 正则表达式：匹配一级数字标题（支持多种格式）
        # 格式1：1. xxx 或 2.xxx（有句号）
        # 格式2：## 1 xxx 或 ### 2 xxx（Markdown格式，数字后有空格）
        # 格式3：2应急工作职责（数字后直接是中文）
        self.PRIMARY_HEADING_PATTERN = re.compile(r"^\s*(?:#{1,6}\s*)?(\d+)(?:\.\s*|\s+)(?!\d)([^\n\r]*)", re.MULTILINE)

        # 正则表达式：匹配二级数字标题（支持多种格式）
        # 格式1：2.1 xxx 或 3.2 xxx
        # 格式2：### 2.1 xxx 或 #### 3.2 xxx（Markdown格式）
        self.SECONDARY_HEADING_PATTERN = re.compile(r"^\s*(?:#{1,6}\s*)?(\d+\.\d+)(?:\s*|\s+)(?!\.\d)([^\n\r]*)", re.MULTILINE)

        # 正则表达式：匹配附件标识
        self.ATTACHMENT_PATTERN = re.compile(r"^附件[：:]\s*(.*)$", re.MULTILINE)
        self.ATTACHMENT_NUM_PATTERN = re.compile(r"^附件\s*(\d+)[：:]?\s*(.*)$", re.MULTILINE)

    def _pdf_to_markdown(self, json_path: Path, md_path: Path) -> None:
        try:
            file_data = self.file_path.read_bytes()
            resp = requests.post(
                self.API_URL,
                headers=self.headers,
                files=[
                    ('file', (self.file_path.name, file_data, 'application/pdf'))
                ]
            )
            resp.raise_for_status()

            j = resp.json()
            if j.get("code") != 200:
                raise RuntimeError(f"API 错误: {j.get('message')}")

            json_path.write_text(json.dumps(j["data"]["result"], ensure_ascii=False, indent=2), encoding="utf-8")
            md_path.write_text(j["data"]["result"].get("markdown", ""), encoding="utf-8")

        except Exception as e:
            logger.error(f"PDF转换失败: {e}")
            raise

    def clean_table_of_contents(self, text):
        """
        清理文档中的目录部分
        参数：
            text: 输入的文档文本
        返回：
            清理后的文档文本
        """
        # 查找目录标题及其内容
        # 匹配"## 目录"或"目录"等开头的行
        toc_pattern = re.compile(r"^#{0,6}\s*目录\s*$", re.MULTILINE)
        
        toc_match = toc_pattern.search(text)
        if not toc_match:
            logger.debug("未发现目录部分")
            return text
        
        toc_start = toc_match.start()
        logger.debug("发现目录部分，起始位置：" + str(toc_start))
        
        # 查找目录结束位置：寻找下一个实际的章节标题
        # 修改正则表达式，匹配如"### 1. 总则"、"## 1 总则"等实际章节
        # 这里的关键是要匹配带有markdown标题符号的实际章节，而不是目录条目
        next_chapter_pattern = re.compile(r"^#{1,6}\s*\d+[\.\s]\s*[^\d\.\s][^\.]*$", re.MULTILINE)
        
        # 从目录开始位置之后查找
        remaining_text = text[toc_start:]
        next_chapter_match = next_chapter_pattern.search(remaining_text)
        
        if next_chapter_match:
            # 找到下一章节，目录结束位置
            toc_end = toc_start + next_chapter_match.start()
            logger.debug("目录结束位置：" + str(toc_end))
            logger.debug("找到的下一章节：" + next_chapter_match.group().strip())
            
            # 移除目录部分
            cleaned_text = text[:toc_start] + text[toc_end:]
            logger.debug("已移除目录部分，原文长度：" + str(len(text)) + " 清理后长度：" + str(len(cleaned_text)))
            
            return cleaned_text
        else:
            # 没有找到下一章节，可能目录在文档末尾，直接截断
            logger.debug("未找到目录后的章节，截断目录部分")
            cleaned_text = text[:toc_start]
            logger.debug("已移除目录部分，原文长度：" + str(len(text)) + " 清理后长度：" + str(len(cleaned_text)))
            
            return cleaned_text

    # 提取附件内容
    def extract_attachments(self, text):
        """
        提取附件内容，返回正文部分和附件列表
        参数：
            text: 输入的文档文本
        返回：
            (main_content, attachments) - 正文内容和附件列表
        """
        attachments = []
        
        # 查找第一个附件标识的位置
        attachment_match = self.ATTACHMENT_PATTERN.search(text)
        if attachment_match:
            # 找到附件开始位置，分割正文和附件
            attachment_start = attachment_match.start()
            main_content = text[:attachment_start].strip()
            attachment_content = text[attachment_start:].strip()
            
            logger.debug("发现附件内容，正文长度：" + str(len(main_content)) + " 附件长度：" + str(len(attachment_content)))
            
            # 按具体附件编号切分（如：附件1、附件2）
            attachment_matches = list(self.ATTACHMENT_NUM_PATTERN.finditer(attachment_content))
            
            if attachment_matches:
                logger.debug("找到具体附件编号：" + str(len(attachment_matches)) + "个")
                for i, match in enumerate(attachment_matches):
                    att_num = match.group(1)
                    att_title = match.group(2).strip()
                    
                    # 确定当前附件的内容范围
                    start_pos = match.start()
                    if i + 1 < len(attachment_matches):
                        end_pos = attachment_matches[i + 1].start()
                    else:
                        end_pos = len(attachment_content)
                    
                    att_content = attachment_content[start_pos:end_pos].strip()
                    
                    attachments.append({
                        "attachment_number": att_num,
                        "attachment_title": att_title,
                        "content": att_content
                    })
                    logger.debug("  附件" + att_num + "：" + att_title)
            else:
                # 没有具体编号，整个附件作为一块
                attachments.append({
                    "attachment_number": "1",
                    "attachment_title": "附件",
                    "content": attachment_content
                })
                logger.debug("  整体附件作为一块")
        else:
            # 没有附件
            main_content = text
            logger.debug("未发现附件内容")
        
        return main_content, attachments

    # 根据分层数字标题切分文档
    def split_by_hierarchical_headings(self, text):
        """
        根据分层数字标题切分文档
        有二级小标题就按二级切分，没有二级就整个一级标题为一块
        三级标题（如3.1.1）包含在对应的二级标题内容中
        每个chunk的内容都包含一级标题信息
        参数：
            text: 输入的文档文本
        返回：
            包含切分后的章节信息的列表，每个元素是一个字典
        """
        chunks = []
        
        # 找到所有一级标题
        primary_matches = list(self.PRIMARY_HEADING_PATTERN.finditer(text))
        if not primary_matches:
            return chunks
        
        logger.debug("找到一级标题数量：" + str(len(primary_matches)))
        
        # 遍历每个一级标题
        for idx in range(len(primary_matches)):
            primary_match = primary_matches[idx]
            primary_number = primary_match.group(1).strip()  # 一级章节号
            primary_title = (primary_match.group(2) or "").strip()  # 一级标题内容
            
            # 确定当前一级标题的文本范围
            start_idx = primary_match.start()
            if idx + 1 < len(primary_matches):
                end_idx = primary_matches[idx + 1].start()
            else:
                end_idx = len(text)
            
            primary_section = text[start_idx:end_idx]
            primary_full_title = primary_number + ". " + primary_title  # 完整的一级标题
            
            logger.debug("处理一级标题：" + primary_full_title)
            
            # 在当前一级标题范围内查找二级标题
            secondary_matches = list(self.SECONDARY_HEADING_PATTERN.finditer(primary_section))
            
            # 过滤出真正属于当前一级标题的二级标题（编号前缀匹配）
            valid_secondary_matches = []
            for sec_match in secondary_matches:
                sec_number = sec_match.group(1)
                # 检查二级标题编号是否以当前一级标题编号开头（如 2.1 属于 2）
                if sec_number.startswith(primary_number + "."):
                    valid_secondary_matches.append(sec_match)
            
            if valid_secondary_matches:
                # 有二级标题，按二级标题切分
                logger.debug("  找到二级标题数量：" + str(len(valid_secondary_matches)))
                
                for sec_idx in range(len(valid_secondary_matches)):
                    sec_match = valid_secondary_matches[sec_idx]
                    sec_number = sec_match.group(1).strip()  # 二级章节号（如2.1）
                    sec_title = (sec_match.group(2) or "").strip()  # 二级标题内容
                    
                    # 确定当前二级标题的文本范围（相对于一级标题的起始位置）
                    sec_start_idx = sec_match.start()
                    if sec_idx + 1 < len(valid_secondary_matches):
                        sec_end_idx = valid_secondary_matches[sec_idx + 1].start()
                    else:
                        sec_end_idx = len(primary_section)
                    
                    section_block = primary_section[sec_start_idx:sec_end_idx].strip()
                    
                    # 构建包含一级标题前缀的内容
                    content_with_prefix = primary_full_title + " " + section_block
                    
                    # 构建二级chunk信息，只保留一级标题完整内容和chunk内容
                    chunk_info = {
                        "primary_section": primary_full_title,
                        "content": content_with_prefix
                    }
                    chunks.append(chunk_info)
                    logger.debug("    生成二级chunk：" + sec_number + " " + sec_title)
                    
            else:
                # 没有二级标题，整个一级标题作为一个完整的块
                logger.debug("  无二级标题，生成完整一级chunk")
                
                # 去除第一行的标题，只保留内容部分
                newline_pos = primary_section.find("\n")
                if newline_pos == -1:
                    content = ""
                else:
                    content = primary_section[newline_pos + 1:].strip()
                
                # 如果内容为空，保留完整的section
                if not content:
                    content = primary_section.strip()
                else:
                    # 构建包含一级标题前缀的内容
                    content = primary_full_title + " " + content
                
                # 构建一级chunk信息，只保留一级标题完整内容和chunk内容
                chunk_info = {
                    "primary_section": primary_full_title,
                    "content": content
                }
                chunks.append(chunk_info)
        
        return chunks

    def _parse_content(self, cleaned_md_path: Path, output_json_path: Path) -> Dict:
        with open(cleaned_md_path, 'r', encoding='utf-8') as f:
            text = f.read()

        # 清理目录部分
        text = self.clean_table_of_contents(text)
        
        # 分离正文和附件
        main_content, attachments = self.extract_attachments(text)
        
        # 分层切分正文文档
        main_chunks = self.split_by_hierarchical_headings(main_content)
        
        # 合并正文chunks和附件chunks
        all_chunks = []
        
        # 添加正文chunks
        for chunk in main_chunks:
            enriched_chunk = {"source_file": str(cleaned_md_path), "type": "main"}
            enriched_chunk.update(chunk)
            all_chunks.append(enriched_chunk)
        
        # 添加附件chunks
        for attachment in attachments:
            att_chunk = {
                "source_file": str(cleaned_md_path),
                "type": "attachment",
                "primary_section": "附件" + attachment["attachment_number"] + ": " + attachment["attachment_title"],
                "content": attachment["content"]
            }
            all_chunks.append(att_chunk)
            logger.info(f"  生成附件chunk：附件{attachment['attachment_number']}")

        return all_chunks

    def load(self, scene_type: str, delete_old_file: bool = None) -> PDFMetadata:
        if delete_old_file:
            self.delete_old_file = delete_old_file

        file_pure_name = self.file_path.stem
        md_save_dir = self.base_dir
        md_save_dir.mkdir(parents=True, exist_ok=True)

        parsed_json_path = md_save_dir / f"{file_pure_name}.json"
        parsed_md_path = md_save_dir / f"{file_pure_name}.md"
        output_json_path = md_save_dir / f"{file_pure_name}_chunks.json"  # 分句过后的md文本
        output_json_path_scene = md_save_dir / f"{file_pure_name}_chunks_{scene_type}.json"  # 分句过后的md文本

        if self.delete_old_file or not parsed_md_path.exists():
            logger.debug(f"[Loader] Generating markdown: {parsed_md_path}")
            self._pdf_to_markdown(parsed_json_path, parsed_md_path)

        if self.delete_old_file or not output_json_path.exists():
            logger.debug(f"[Loader] Parsing content: {output_json_path}")
            chunks = self._parse_content(parsed_md_path, output_json_path)

            with open(output_json_path, 'w', encoding='utf-8') as f:
                json.dump(chunks, f, ensure_ascii=False, indent=2)
            
            with open(output_json_path_scene, 'w', encoding='utf-8') as f:
                json.dump(chunks, f, ensure_ascii=False, indent=2)
            
        else:
            with open(output_json_path, 'r', encoding='utf-8') as f:
                chunks = json.load(f)

        return chunks


if __name__ == "__main__":
    loader = EpPdfLoader("/Users/wzq/Desktop/关于印发《库场货垛坍塌事故处置方案》的通知(1) (2).pdf")
    result = loader.load()
    logger.debug(result)
