"""
PDF标准文档结构化提取工具
功能：自动提取目录（包括章节、附录和表的题录）、正文、表格数据和嵌入图片
环境要求：Python 3.8+，依赖见 requirements.txt
"""

import json
import pdfplumber
import re
import os
import csv
import logging
from pathlib import Path
from typing import List, Dict

def roman_to_int(s: str) -> int:
    """将罗马数字转换为整数（支持大写罗马数字）"""
    roman_map = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
    s = s.upper().strip()
    total = 0
    prev_value = 0
    for char in reversed(s):
        value = roman_map.get(char, 0)
        if value < prev_value:
            total -= value
        else:
            total += value
            prev_value = value
    return total

class PDFStandardExtractor:
    def __init__(self):
        # 配置说明：
        # 1. file 部分：PDF文件路径和输出目录
        # 2. toc 部分：采用“行对”解析题录，pair_pattern 用于匹配形如 “1 … 1” 或 “A.1 … 4” 的行，
        #    page_pattern 用于匹配仅包含填充符号和页码的行，如 “………………………………………………………………………………………………………… Ⅲ”
        #    end_pattern 在此示例中只用来提示参考文献页（但不作为终止条件，因为附录和表的题录也需导出）
        self.config = {
            'file': {
                "pdf_path": "single/pdf_file.pdf",
                "output_dir": "output"
            },
            'toc': {
                'start_pattern': r'目\s*次',
                'pair_pattern': r'^(?P<num>[A-Z\d\.]+)\s+.*\s+(?P<page>\d+|[IVXLCDM]+)$',
                'page_pattern': r'^[\u2026\.\s\u2014]+(?P<page>\d+|[IVXLCDM]+)$',
                'end_pattern': r'^(参考文献)$'
            },
            'tables': {
                'strategy': "lines_strict",
                'title_pattern': r'^表\s*([A-Z]?\.?\d+)',
                'min_cols': 2
            },
            'figures': {
                'extensions': ['png', 'jpg', 'jpeg']
            }
        }
        self.pdf_path = Path(self.config['file']["pdf_path"])
        self.output_dir = Path(self.config['file']["output_dir"])
        self._setup_directories()
        self.logger = self._setup_logger()

    def _setup_directories(self):
        (self.output_dir / "tables").mkdir(parents=True, exist_ok=True)
        (self.output_dir / "figures").mkdir(parents=True, exist_ok=True)
        (self.output_dir / "chapters").mkdir(parents=True, exist_ok=True)
        (self.output_dir / "metadata").mkdir(parents=True, exist_ok=True)

    def _setup_logger(self):
        logger = logging.getLogger("PDFExtractor")
        logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        file_handler = logging.FileHandler(self.output_dir / "extraction.log", encoding="utf-8")
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(formatter)
        logger.addHandler(console_handler)
        return logger

    def extract_all(self):
        try:
            self.logger.info(f"开始处理文件：{self.pdf_path.name}")
            with pdfplumber.open(self.pdf_path) as pdf:
                metadata = self._extract_metadata(pdf)
                self._save_metadata(metadata)
                toc = self._extract_toc(pdf)
                self._save_toc(toc)
                self._extract_content(pdf, toc)
                self._extract_tables(pdf)
                self._extract_images(pdf)
            self.logger.info("处理完成！结果保存在：%s", self.output_dir)
        except Exception as e:
            self.logger.error("处理过程中发生错误：%s", str(e), exc_info=True)
            raise

    def _extract_metadata(self, pdf) -> Dict:
        metadata = {
            "title": pdf.metadata.get("Title", ""),
            "author": pdf.metadata.get("Author", ""),
            "creation_date": pdf.metadata.get("CreationDate", ""),
            "pages": len(pdf.pages)
        }
        self.logger.debug("提取的元数据: %s", metadata)
        return metadata

    def _save_metadata(self, metadata: Dict):
        with open(self.output_dir / "metadata" / "document_metadata.json", "w", encoding="utf-8") as f:
            json.dump(metadata, f, indent=2)

    def _extract_toc(self, pdf) -> List[Dict]:
        """
        采用行对方式解析题录页：
          1. 从包含“目次”的页面中读取所有非空行；
          2. 忽略无关行（例如 "GB2760—2014" 和 "目 次"）；
          3. 对于每个题录条目，判断标题行是否以“附录”或“表”开头（设置类别），否则归为“章节”；
          4. 如果下一行匹配 pair_pattern 或 page_pattern，则提取页码信息（阿拉伯数字或罗马数字）。
        """
        toc = []
        ignore = {"GB2760—2014", "目 次"}
        pair_pattern = re.compile(self.config['toc']['pair_pattern'])
        page_pattern = re.compile(self.config['toc']['page_pattern'])
        merged_lines = []
        toc_pages = self._find_toc_pages(pdf)
        self.logger.debug("目录页索引: %s", toc_pages)
        for page_num in toc_pages:
            page = pdf.pages[page_num]
            text = page.extract_text()
            if not text:
                continue
            self.logger.debug("页面 %d 原始文本: %s", page_num+1, text)
            lines = [line.strip() for line in text.split('\n') if line.strip()]
            merged_lines.extend(lines)
        self.logger.debug("合并后的目录行: %s", merged_lines)

        i = 0
        while i < len(merged_lines):
            line = merged_lines[i]
            if line in ignore:
                self.logger.debug("忽略行: %s", line)
                i += 1
                continue

            # 判断题录类别：以“附录”或“表”开头则分别归类，且去除前缀；否则归类为“章节”
            if line.startswith("附录"):
                category = "附录"
                title_text = line[len("附录"):].strip()
            elif line.startswith("表"):
                category = "表"
                title_text = line[len("表"):].strip()
            else:
                category = "章节"
                title_text = line

            if i+1 < len(merged_lines):
                next_line = merged_lines[i+1]
                m_pair = pair_pattern.match(next_line)
                if m_pair:
                    entry_num = m_pair.group("num").strip()
                    page_str = m_pair.group("page").strip()
                    if page_str.isdigit():
                        start_page = int(page_str) - 1
                    else:
                        start_page = roman_to_int(page_str) - 1
                    toc.append({
                        "section_num": entry_num,
                        "title": title_text,
                        "start_page": start_page,
                        "category": category
                    })
                    self.logger.debug("匹配行对: [%s] + [%s] -> (%s, %s, %s)", line, next_line, entry_num, start_page, category)
                    i += 2
                    continue
                else:
                    m_page = page_pattern.match(next_line)
                    if m_page:
                        page_str = m_page.group("page").strip()
                        if page_str.isdigit():
                            start_page = int(page_str) - 1
                        else:
                            start_page = roman_to_int(page_str) - 1
                        toc.append({
                            "section_num": title_text,
                            "title": title_text,
                            "start_page": start_page,
                            "category": category
                        })
                        self.logger.debug("匹配单独页码行: [%s] + [%s] -> (%s, %s, %s)", line, next_line, title_text, start_page, category)
                        i += 2
                        continue
            self.logger.debug("无法匹配题录行: %s", line)
            i += 1

        self.logger.info("提取到的题录项: %s", toc)
        return toc

    def _find_toc_pages(self, pdf) -> List[int]:
        toc_pages = []
        for idx, page in enumerate(pdf.pages):
            text = page.extract_text()
            if text and re.search(self.config['toc']['start_pattern'], text):
                self.logger.debug("发现包含‘目次’的页面: %d", idx+1)
                toc_pages.append(idx)
        return toc_pages

    def _extract_content(self, pdf, toc: List[Dict]):
        # 仅对类别为“章节”的题录提取正文内容
        chapters = [entry for entry in toc if entry.get("category", "章节") == "章节"]
        for idx, section in enumerate(chapters):
            try:
                start_page = section['start_page']
                end_page = self._find_section_end(pdf, idx, chapters)
                self.logger.info("提取章节：%s, 页码范围: %d 到 %d", section['title'], start_page+1, end_page+1)
                content = []
                for pnum in range(start_page, end_page+1):
                    page = pdf.pages[pnum]
                    raw_text = page.extract_text()
                    self.logger.debug("页面 %d 原始文本: %s", pnum+1, raw_text)
                    content.append(self._clean_page_text(page))
                filename = f"{section['section_num']}_{section['title']}.txt"
                with open(self.output_dir / "chapters" / filename, "w", encoding="utf-8") as f:
                    f.write("\n".join(content))
                self.logger.info("已提取章节：%s %s", section['section_num'], section['title'])
            except Exception as e:
                self.logger.warning("提取章节失败：%s - %s", section['title'], str(e))

    def _find_section_end(self, pdf, current_idx: int, chapters: List[Dict]) -> int:
        if current_idx + 1 < len(chapters):
            return chapters[current_idx + 1]['start_page'] - 1
        return len(pdf.pages) - 1

    def _clean_page_text(self, page) -> str:
        text = page.extract_text()
        if text is None:
            self.logger.debug("页面 %d 无文本", page.page_number)
            return ""
        self.logger.debug("页面 %d 清洗前文本: %s", page.page_number, text)
        text = re.sub(r'\n\s*\d+\s*\n', '\n', text)
        text = re.sub(r'GB ?2760—2014\s*', '', text)
        cleaned_text = text.strip()
        self.logger.debug("页面 %d 清洗后文本: %s", page.page_number, cleaned_text)
        return cleaned_text

    def _extract_tables(self, pdf):
        for idx, page in enumerate(pdf.pages):
            try:
                text = page.extract_text()
                if text:
                    table_titles = re.findall(self.config['tables']['title_pattern'], text, re.M)
                else:
                    table_titles = []
                tables = page.extract_tables({
                    "vertical_strategy": self.config['tables']['strategy'],
                    "horizontal_strategy": self.config['tables']['strategy'],
                    "min_words_vertical": 2,
                    "min_words_horizontal": 2
                })
                for table_idx, table in enumerate(tables):
                    if table and len(table[0]) >= self.config['tables']['min_cols']:
                        title = table_titles[table_idx] if table_idx < len(table_titles) else ""
                        self._save_table(table, page.page_number, table_idx+1, title)
            except Exception as e:
                self.logger.warning("页面 %d 表格提取失败：%s", idx+1, str(e))

    def _save_table(self, table_data, page_num: int, table_num: int, title: str = ""):
        filename = f"page{page_num}_table{table_num}"
        if title:
            filename += f"_{title.replace(' ', '_')}"
        filename += ".csv"
        with open(self.output_dir / "tables" / filename, "w", newline="", encoding="utf-8") as f:
            writer = csv.writer(f)
            for row in table_data:
                cleaned_row = [cell.replace('\n', ' ').strip() if cell else "" for cell in row]
                writer.writerow(cleaned_row)

    def _extract_images(self, pdf):
        for idx, page in enumerate(pdf.pages):
            try:
                images = page.images
                for img_idx, img in enumerate(images):
                    if img.get('ext') in self.config['figures']['extensions']:
                        img_data = img['stream'].get_data()
                        filename = f"page{idx+1}_image{img_idx+1}.{img['ext']}"
                        with open(self.output_dir / "figures" / filename, "wb") as f:
                            f.write(img_data)
            except Exception as e:
                self.logger.warning("页面 %d 图片提取失败：%s", idx+1, str(e))

    def _save_toc(self, toc: List[Dict]):
        with open(self.output_dir / "toc.csv", "w", newline="", encoding="utf-8") as f:
            fieldnames = ["section_num", "title", "start_page", "category"]
            writer = csv.DictWriter(f, fieldnames=fieldnames)
            writer.writeheader()
            writer.writerows(toc)

if __name__ == "__main__":
    extractor = PDFStandardExtractor()
    extractor.extract_all()