import json
import os

import fitz
from pymupdf import mupdf

from app.utils.cv_tools import group_text_lines_by_proximity, detect_overlapping_text_lines
from app.utils.file_tools import extract_filename_without_extension
from app.utils.pymupdf_tools import pdf_get_content_region, get_pages_from_range, deduplicate_spans_in_blocks_with_stats
from app.utils.timing import timing_decorator
from tests.base_test import base_test_case
import re  # 用于清理多余的空白行

logger = base_test_case.get_logger(__name__)
TEST_DATA_DIR = base_test_case.test_data_dir
OUTPUT_DATA_DIR = base_test_case.output_data_dir
os.environ.setdefault('PYMUPDF_USE_EXTRA', '1')
os.environ.setdefault('PYMUPDF_EXCEPTIONS_VERBOSE', '1')


@timing_decorator()
def load_pdf(input_pdf=None, page_range=None):
    zoom = 1.0
    filename_without_ext = f"{extract_filename_without_extension(input_pdf)}_v3"

    doc = fitz.open(input_pdf)
    try:
        pages = get_pages_from_range(input_pdf, page_range=page_range)
        ocr_results = []
        for page in pages:
            page_num = page.number
            # 获取页内容区域
            clip_rect = pdf_get_content_region(page, zoom_factor=zoom, is_show_log=False)
            block_dict = page.get_text("dict", clip=clip_rect, sort=True)

            # text = page.get_text()
            # dict = page.get_text('dict', clip=clip_rect, sort=True)
            blocks = page.get_text('blocks', clip=clip_rect, sort=True)
            words = page.get_text('words', clip=clip_rect, sort=True)
            rawdict = page.get_text('rawdict', clip=clip_rect, sort=True)
            rawjson = page.get_text('rawjson', clip=clip_rect, sort=True)
            json = page.get_text('json', clip=clip_rect, sort=True)
            # html = page.get_text('html', clip=clip_rect, sort=True, flags=mupdf.FZ_STEXT_PRESERVE_IMAGES)
            html = page.get_text('html', clip=clip_rect, sort=True)
            with open(OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}.html", "w", encoding="utf-8") as file:
                file.write(html)

            def pdf_page_to_markdown_via_html(page):
                import html2text
                html_str = page.get_text("html")
                h = html2text.HTML2Text()
                h.ignore_links = False  # 可根据需要调整选项
                # h.body_width = 0 # 可选：禁用自动换行
                md_str = h.handle(html_str)
                return md_str

            pdf_page_to_markdown_via_html(page)
            xhtml = page.get_text('xhtml', clip=clip_rect, sort=True)
            with open(OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}.xhtml", "w", encoding="utf-8") as file:
                file.write(xhtml)
            xml = page.get_text('xml', clip=clip_rect, sort=True)
            with open(OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}.xml", "w", encoding="utf-8") as file:
                file.write(xml)

            text = page.get_text('text', clip=clip_rect, sort=True)

            # 去除重复的span
            deduplicated_dict = deduplicate_spans_in_blocks_with_stats(block_dict, verbose=True)

            for block in deduplicated_dict.get("blocks", []):
                for line in block.get("lines", []):
                    for span in line.get("spans", []):
                        text = span["text"]
                        bbox = fitz.Rect(span["bbox"])
                        ocr_results.append({
                            "text": text,
                            "bbox": list(bbox.irect),
                            "height": bbox.irect.height,
                            "width": bbox.irect.width,
                            "size": span["size"],
                            "flags": span["flags"],
                            "bidi": span["bidi"],
                            "char_flags": span["char_flags"],
                            "font": span["font"],
                            "color": span["color"],
                            # "alpha": span["alpha"],
                            "ascender": span["ascender"],
                            "descender": span["descender"],
                        })
            output_path = OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}.json"
            json_str = json.dumps(ocr_results, indent=4, ensure_ascii=False)
            if output_path:
                with open(output_path, "w", encoding="utf-8") as file:
                    file.write(json_str)

        return ocr_results, filename_without_ext, page_num
    finally:
        doc.close()


@timing_decorator()
def analyze_text_structure(ocr_results,
                           header_margin_ratio=0.05,
                           footer_margin_ratio=0.05,
                           proximity_vertical_threshold=20,
                           overlap_threshold=0.3,
                           thresholds=None):
    """
    使用提供的函数分析文本结构，识别段落、标题等

    Args:
        ocr_results: OCR识别结果列表
        header_margin_ratio: 页眉区域比例（相对于页面高度），默认0.05（5%）
        footer_margin_ratio: 页脚区域比例（相对于页面高度），默认0.05（5%）
        proximity_vertical_threshold: 文本行 proximity 分组的垂直距离阈值，默认20
        overlap_threshold: 文本行重叠检测的阈值，默认0.3
        thresholds: 各类文本元素的阈值配置字典，默认为None，使用内置默认值

    Returns:
        dict: 包含分析结果的字典
    """
    # 默认阈值配置，参考PP-StructureV3的配置方式
    default_thresholds = {
        # 按元素类型分类的阈值
        'paragraph_title': 0.3,  # 段落标题
        'image': 0.5,  # 图片
        'text': 0.4,  # 正文文本
        'number': 0.5,  # 数字
        'abstract': 0.5,  # 摘要
        'content': 0.5,  # 内容
        'figure_table_chart_title': 0.5,  # 图表标题
        'formula': 0.3,  # 公式
        'table': 0.5,  # 表格
        'reference': 0.5,  # 参考文献
        'doc_title': 0.5,  # 文档标题
        'footnote': 0.5,  # 脚注
        'header': 0.5,  # 页眉
        'algorithm': 0.5,  # 算法
        'footer': 0.5,  # 页脚
        'seal': 0.45,  # 印章
        'chart': 0.5,  # 图表
        'formula_number': 0.5,  # 公式编号
        'aside_text': 0.5,  # 旁注文本
        'reference_content': 0.5,  # 参考文献内容

        # 标题检测相关阈值
        'title_size_ratio': 1.5,  # 标题相对于平均字号的倍数阈值
        'title_min_size': 16,  # 标题的最小绝对大小
        'large_title_position_ratio': 0.2,  # 大标题在页面中的位置比例阈值
        'section_title_position_ratio': 0.5,  # 小节标题在页面中的位置比例阈值
        'section_title_size_ratio': 1.4,  # 小节标题相对于平均字号的倍数阈值
        'main_title_size_ratio': 1.3,  # 主标题相对于平均字号的倍数阈值
    }

    # 如果提供了自定义阈值，则合并到默认阈值中
    if thresholds:
        default_thresholds.update(thresholds)

    # 提取阈值变量以便使用
    title_size_ratio = default_thresholds['title_size_ratio']
    title_min_size = default_thresholds['title_min_size']
    large_title_position_ratio = default_thresholds['large_title_position_ratio']
    section_title_position_ratio = default_thresholds['section_title_position_ratio']
    section_title_size_ratio = default_thresholds['section_title_size_ratio']
    main_title_size_ratio = default_thresholds['main_title_size_ratio']

    # 将数据转换为所需的格式
    # ocr_results 格式: [[line1, line2, ...]]
    # line 格式: [box, [text, confidence], ...]
    formatted_results = [[
        [
            [  # box - 四个点的坐标
                [item['bbox'][0], item['bbox'][1]],  # 左上角
                [item['bbox'][2], item['bbox'][1]],  # 右上角
                [item['bbox'][2], item['bbox'][3]],  # 右下角
                [item['bbox'][0], item['bbox'][3]]  # 左下角
            ],
            [item['text'], 1.0]  # [text, confidence]
        ] for item in ocr_results
    ]]

    # 使用group_text_lines_by_proximity按垂直距离对文本进行分组
    proximity_groups = group_text_lines_by_proximity(formatted_results, vertical_threshold=proximity_vertical_threshold)

    # 检测重叠的文本行
    overlapping_pairs = detect_overlapping_text_lines(formatted_results, overlap_threshold=overlap_threshold)

    # 计算高度和位置的统计信息，用于判断标题
    heights = [item['height'] for item in ocr_results if item['text'].strip()]
    sizes = [item.get('size', item['height']) for item in ocr_results if item['text'].strip()]

    if heights:
        avg_height = sum(heights) / len(heights)
        max_height = max(heights)
        min_height = min(heights)
    else:
        avg_height = max_height = min_height = 0

    if sizes:
        avg_size = sum(sizes) / len(sizes)
        max_size = max(sizes)
        min_size = min(sizes)
    else:
        avg_size = max_size = min_size = 0

    # 按y坐标对文本进行排序，以便分析页面布局
    sorted_items = sorted([item for item in ocr_results if item['text'].strip()],
                          key=lambda x: x['bbox'][1])

    # 分析文本结构
    paragraphs = []
    titles = []
    headers = []
    footers = []
    content_lines = []

    # 确定页面边界
    if sorted_items:
        page_top = min(item['bbox'][1] for item in sorted_items)
        page_bottom = max(item['bbox'][3] for item in sorted_items)
        page_height = page_bottom - page_top

        # 定义页眉页脚区域
        header_zone_bottom = page_top + page_height * header_margin_ratio
        footer_zone_top = page_bottom - page_height * footer_margin_ratio
    else:
        page_top = page_bottom = header_zone_bottom = footer_zone_top = 0

    for item in sorted_items:
        text = item['text'].strip()
        height = item['height']
        size = item.get('size', height)
        width = item['width']
        y_position_top = item['bbox'][1]  # 文本的y坐标（顶部）
        y_position_bottom = item['bbox'][3]  # 文本的y坐标（底部）
        x_position = item['bbox'][0]  # 文本的x坐标（左侧）

        if not text or text.isspace():
            continue

        # 判断是否为页眉或页脚
        is_header_footer = False

        # 页眉：位于页面顶部区域的文本
        if y_position_bottom <= header_zone_bottom:
            headers.append(item)
            is_header_footer = True
            continue

        # 页脚：位于页面底部区域的文本
        if y_position_top >= footer_zone_top:
            footers.append(item)
            is_header_footer = True
            continue

        # 判断是否为标题 - 主要基于视觉特征和字体信息
        is_title = False

        # 基于字号判断（显著大于平均字号的可能是标题）
        if size >= avg_size * title_size_ratio:
            is_title = True

        # 基于绝对字号判断（特别大的文字）
        if size >= title_min_size:
            is_title = True

        # 如果有字体信息，粗体或特殊字体可能是标题
        font = item.get('font', '')
        if 'bold' in font.lower() or 'title' in font.lower() or 'heading' in font.lower():
            is_title = True

        # 页面顶部的大号文字很可能是章节标题
        if y_position_top < page_top + page_height * large_title_position_ratio and size > avg_size * main_title_size_ratio:
            is_title = True

        # 页面中的较大文字可能是小节标题
        if (page_top + page_height * large_title_position_ratio <= y_position_top <
                page_top + page_height * section_title_position_ratio and
                size > avg_size * section_title_size_ratio):
            is_title = True

        if is_title:
            titles.append(item)
        else:
            content_lines.append(item)

    # 使用 proximity_groups 来组织段落
    for group in proximity_groups:
        if group:  # 确保组不为空
            # 按垂直位置排序组内的文本行
            group.sort(key=lambda x: x['center'][1])

            # 将组内文本连接成段落文本
            paragraph_text = ''.join([line['text'] for line in group])

            # 获取段落的边界框
            min_top = min(line['top'] for line in group)
            max_bottom = max(line['bottom'] for line in group)

            # 计算段落的近似左边界和右边界
            min_left = min(min(point[0] for point in line['box']) for line in group)
            max_right = max(max(point[0] for point in line['box']) for line in group)

            paragraphs.append({
                'text': paragraph_text,
                'lines': group,
                'bbox': [min_left, min_top, max_right, max_bottom],
                'line_count': len(group)
            })

    return {
        'proximity_groups': proximity_groups,
        'overlapping_pairs': overlapping_pairs,
        'titles': titles,
        'paragraphs': paragraphs,
        'headers': headers,
        'footers': footers,
        'content_lines': content_lines,
        'stats': {
            'avg_height': avg_height,
            'max_height': max_height,
            'min_height': min_height,
            'avg_size': avg_size,
            'max_size': max_size,
            'min_size': min_size,
            'page_top': page_top,
            'page_bottom': page_bottom,
            'page_height': page_bottom - page_top if page_bottom > page_top else 0,
            'header_margin_ratio': header_margin_ratio,
            'footer_margin_ratio': footer_margin_ratio,
            'proximity_vertical_threshold': proximity_vertical_threshold,
            'overlap_threshold': overlap_threshold,
            'thresholds': default_thresholds
        }
    }


def print_analyze_text_structure(analysis_result):
    # 输出分析结果
    print("\n=== 文本结构分析结果 ===")
    print(f"检测到标题数量: {len(analysis_result['titles'])}")
    for title in analysis_result['titles']:
        size = title.get('size', title['height'])
        font = title.get('font', 'Unknown')
        print(
            f"  标题: '{title['text']}' (大小: {size:.1f}, 字体: {font}, 位置: ({title['bbox'][0]}, {title['bbox'][1]}))")

    print(f"\n检测到页眉数量: {len(analysis_result['headers'])}")
    for header in analysis_result['headers']:
        print(f"  页眉: '{header['text']}'")

    print(f"\n检测到页脚数量: {len(analysis_result['footers'])}")
    for footer in analysis_result['footers']:
        print(f"  页脚: '{footer['text']}'")

    print(f"\n检测到段落数量: {len(analysis_result['paragraphs'])}")
    for i, paragraph in enumerate(analysis_result['paragraphs']):
        print(f"  段落 {i + 1}:")
        print(f"    内容: '{paragraph['text']}'")
        print(f"    行数: {paragraph['line_count']}")
        print(f"    位置: {paragraph['bbox']}")

    print(f"\n检测到相近文本组数量: {len(analysis_result['proximity_groups'])}")
    print(f"检测到重叠文本对数量: {len(analysis_result['overlapping_pairs'])}")

    # 打印统计信息
    stats = analysis_result['stats']
    print(f"\n文档统计信息:")
    print(f"  平均文字高度: {stats['avg_height']:.2f}")
    print(f"  平均文字大小: {stats['avg_size']:.2f}")
    print(f"  最大文字大小: {stats['max_size']:.2f}")
    print(f"  最小文字大小: {stats['min_size']:.2f}")


@timing_decorator()
def analyze_text_structure_v2(ocr_results,
                              header_margin_ratio=0.05,
                              footer_margin_ratio=0.05,
                              proximity_vertical_threshold=20,
                              overlap_threshold=0.3,
                              thresholds=None):
    """
    使用提供的函数分析文本结构，识别段落、标题等（主要基于行高和位置，适度使用字体信息）

    Args:
        ocr_results: OCR识别结果列表
        header_margin_ratio: 页眉区域比例（相对于页面高度），默认0.05（5%）
        footer_margin_ratio: 页脚区域比例（相对于页面高度），默认0.05（5%）
        proximity_vertical_threshold: 文本行 proximity 分组的垂直距离阈值，默认20
        overlap_threshold: 文本行重叠检测的阈值，默认0.3
        thresholds: 各类文本元素的阈值配置字典，默认为None，使用内置默认值

    Returns:
        dict: 包含分析结果的字典
    """
    # 默认阈值配置
    default_thresholds = {
        # 标题检测相关阈值
        'main_title_height_ratio': 1.8,  # 主标题相对于平均高度的倍数阈值
        'main_title_min_height': 30,  # 主标题的最小绝对高度
        'main_title_position_ratio': 0.2,  # 主标题在页面中的位置比例阈值
        'section_title_height_ratio': 1.5,  # 章节标题相对于平均高度的倍数阈值
        'section_title_position_ratio': 0.5,  # 章节标题在页面中的位置比例阈值
        'subsection_title_height_ratio': 1.3,  # 子标题相对于平均高度的倍数阈值
        'subsection_title_position_ratio': 0.7,  # 子标题在页面中的位置比例阈值
        'bold_font_boost': 0.1,  # 加粗字体的加分值
        'title_min_bold_lines': 2,  # 标题中至少需要的加粗行数（用于段落分析）

        # 特殊文本处理阈值
        'special_text_max_lines': 1,  # 特殊文本的最大行数
        'special_text_min_length': 4,  # 特殊文本的最小长度
        'special_text_max_length': 20,  # 特殊文本的最大长度
        'special_text_height_ratio': 1.2,  # 特殊文本相对于平均高度的比例
    }

    # 如果提供了自定义阈值，则合并到默认阈值中
    if thresholds:
        default_thresholds.update(thresholds)

    # 提取阈值变量以便使用
    main_title_height_ratio = default_thresholds['main_title_height_ratio']
    main_title_min_height = default_thresholds['main_title_min_height']
    main_title_position_ratio = default_thresholds['main_title_position_ratio']
    section_title_height_ratio = default_thresholds['section_title_height_ratio']
    section_title_position_ratio = default_thresholds['section_title_position_ratio']
    subsection_title_height_ratio = default_thresholds['subsection_title_height_ratio']
    subsection_title_position_ratio = default_thresholds['subsection_title_position_ratio']
    bold_font_boost = default_thresholds['bold_font_boost']
    title_min_bold_lines = default_thresholds['title_min_bold_lines']
    special_text_max_lines = default_thresholds['special_text_max_lines']
    special_text_min_length = default_thresholds['special_text_min_length']
    special_text_max_length = default_thresholds['special_text_max_length']
    special_text_height_ratio = default_thresholds['special_text_height_ratio']

    # 将数据转换为所需的格式
    # ocr_results 格式: [[line1, line2, ...]]
    # line 格式: [box, [text, confidence], ...]
    formatted_results = [[
        [
            [  # box - 四个点的坐标
                [item['bbox'][0], item['bbox'][1]],  # 左上角
                [item['bbox'][2], item['bbox'][1]],  # 右上角
                [item['bbox'][2], item['bbox'][3]],  # 右下角
                [item['bbox'][0], item['bbox'][3]]  # 左下角
            ],
            [item['text'], 1.0]  # [text, confidence]
        ] for item in ocr_results
    ]]

    # 使用group_text_lines_by_proximity按垂直距离对文本进行分组
    proximity_groups = group_text_lines_by_proximity(formatted_results, vertical_threshold=proximity_vertical_threshold)

    # 检测重叠的文本行
    overlapping_pairs = detect_overlapping_text_lines(formatted_results, overlap_threshold=overlap_threshold)

    # 计算高度和位置的统计信息，用于判断标题
    heights = [item['height'] for item in ocr_results if item['text'].strip()]

    if heights:
        avg_height = sum(heights) / len(heights)
        max_height = max(heights)
        min_height = min(heights)
    else:
        avg_height = max_height = min_height = 0

    # 按y坐标对文本进行排序，以便分析页面布局
    sorted_items = sorted([item for item in ocr_results if item['text'].strip()],
                          key=lambda x: x['bbox'][1])

    # 分析文本结构
    paragraphs = []
    main_titles = []  # 一级标题 - 文档标题
    secondary_titles = []  # 二级标题 - 副标题
    section_titles = []  # 三级标题 - 章节标题
    subsection_titles = []  # 四级标题 - 子标题
    paragraph_titles = []  # 段落标题
    headers = []  # 页眉
    footers = []  # 页脚
    content_lines = []  # 正文
    special_items = []  # 特殊项目（如"核心考点精讲"等）

    # 确定页面边界
    if sorted_items:
        page_top = min(item['bbox'][1] for item in sorted_items)
        page_bottom = max(item['bbox'][3] for item in sorted_items)
        page_height = page_bottom - page_top

        # 定义页眉页脚区域
        header_zone_bottom = page_top + page_height * header_margin_ratio
        footer_zone_top = page_bottom - page_height * footer_margin_ratio
    else:
        page_top = page_bottom = header_zone_bottom = footer_zone_top = 0

    # 辅助函数：检查字体是否加粗
    def is_bold_font(item):
        font = item.get('font', '')
        # 检查字体名称中是否包含"bold"字样
        return 'bold' in font.lower() or 'Bold' in font or 'BLACK' in font.upper() or 'HEAVY' in font.upper()

    # 辅助函数：检查是否为特殊文本（基于视觉特征）
    def is_special_text(item):
        text = item['text'].strip()
        height = item['height']
        text_length = len(text)

        # 基于多个视觉和文本特征判断是否为特殊文本
        is_single_line = '\n' not in text
        is_short_text = special_text_min_length <= text_length <= special_text_max_length
        is_slightly_larger = avg_height > 0 and height > avg_height and height <= avg_height * special_text_height_ratio
        is_centered_like = False  # 可以通过位置判断是否居中

        # 如果文本较短、单行、略大于平均高度，可能是特殊标记文本
        return (is_single_line and is_short_text and is_slightly_larger)

    # 创建一个从文本内容到原始项目的映射，用于在处理段落时查找原始项目
    text_to_item_map = {item['text'].strip(): item for item in ocr_results if item['text'].strip()}

    for item in sorted_items:
        text = item['text'].strip()
        height = item['height']
        y_position_top = item['bbox'][1]  # 文本的y坐标（顶部）
        y_position_bottom = item['bbox'][3]  # 文本的y坐标（底部）

        if not text or text.isspace():
            continue

        # 检查是否为特殊文本
        if is_special_text(item):
            special_items.append(item)
            continue

        # 判断是否为页眉或页脚
        # 页眉：位于页面顶部区域的文本
        if y_position_bottom <= header_zone_bottom:
            headers.append(item)
            continue

        # 页脚：位于页面底部区域的文本
        if y_position_top >= footer_zone_top:
            footers.append(item)
            continue

        # 判断标题类型 - 基于行高、位置和字体特征
        is_title = False

        # 计算标题得分（结合高度和字体特征）
        def calculate_title_score(item, base_ratio):
            score = 0
            height = item['height']
            # 基于高度的得分
            if avg_height > 0:
                height_score = height / avg_height
                score += height_score

            # 基于字体加粗的得分提升
            if is_bold_font(item):
                score += bold_font_boost * base_ratio

            return score

        # 一级标题（文档标题）判断
        is_main_title = False
        main_title_score = calculate_title_score(item, main_title_height_ratio)
        if (height >= avg_height * main_title_height_ratio and height >= main_title_min_height and
                y_position_top < page_top + page_height * main_title_position_ratio):
            main_titles.append(item)
            is_main_title = True
            is_title = True

        # 二级标题（副标题）判断
        is_secondary_title = False
        if not is_main_title:
            secondary_title_score = calculate_title_score(item, section_title_height_ratio * 0.9)

            # 二级标题应该在页面上半部分，高度适中，可能加粗
            if (height >= avg_height * 1.2 and  # 至少略高于平均高度
                    y_position_top < page_top + page_height * section_title_position_ratio):
                secondary_titles.append(item)
                is_secondary_title = True
                is_title = True

        # 三级标题（章节标题）判断
        is_section_title = False
        if not is_main_title and not is_secondary_title:
            section_title_score = calculate_title_score(item, section_title_height_ratio)

            # 章节标题应该在页面上半部分，高度适中
            if (height >= avg_height * 1.2 and  # 至少略高于平均高度
                    y_position_top < page_top + page_height * section_title_position_ratio):
                section_titles.append(item)
                is_section_title = True
                is_title = True

        # 四级标题（子标题/段落标题）判断
        is_subsection_title = False
        if not is_main_title and not is_secondary_title and not is_section_title:
            # 更细致地基于行高和位置判断段落标题
            # 段落标题通常具有以下特征：
            # 1. 行高略高于平均值但不会过高
            # 2. 位置通常不在页面顶部（区别于主标题）
            # 3. 通常独占一行

            height_ratio = height / avg_height if avg_height > 0 else 0

            # 判断是否处于适合段落标题的区域（页面中部偏下）
            relative_position = (y_position_top - page_top) / page_height if page_height > 0 else 0
            in_content_area = 0.2 <= relative_position <= 0.8

            # 判断行高特征（适中的高度）
            moderate_height = 1.1 <= height_ratio <= 1.5

            # 检查是否独占一行（左右留有适当空白）
            x_position = item['bbox'][0]
            line_width = item['bbox'][2] - item['bbox'][0]
            centered_or_left_aligned = x_position > 50  # 不贴最左边，有一定缩进

            if moderate_height and in_content_area and centered_or_left_aligned:
                subsection_titles.append(item)
                is_subsection_title = True
                is_title = True

        # 段落标题判断
        is_paragraph_title = False
        if not is_main_title and not is_secondary_title and not is_section_title and not is_subsection_title:
            # 段落标题通常加粗且位置适中
            if (is_bold_font(item) and
                    height >= avg_height * 1.0 and  # 至少接近平均高度
                    y_position_top < page_top + page_height * 0.8):  # 不在页面底部
                paragraph_titles.append(item)
                is_paragraph_title = True
                is_title = True

        # 正文内容
        if not is_title:
            content_lines.append(item)

    # 使用 proximity_groups 来组织段落，将标题和内容组织在一起
    organized_paragraphs = []

    # 首先收集所有标题，按位置排序
    all_titles = []
    all_titles.extend([(title, 'main') for title in main_titles])
    all_titles.extend([(title, 'secondary') for title in secondary_titles])
    all_titles.extend([(title, 'section') for title in section_titles])
    all_titles.extend([(title, 'subsection') for title in subsection_titles])
    all_titles.extend([(title, 'paragraph') for title in paragraph_titles])
    all_titles.sort(key=lambda x: x[0]['bbox'][1])  # 按y坐标排序

    # 按照标题将内容分组
    if all_titles:
        for i, (title, title_type) in enumerate(all_titles):
            # 确定下一个标题的位置，如果这是最后一个标题，则使用页面底部
            if i < len(all_titles) - 1:
                next_title_y = all_titles[i + 1][0]['bbox'][1]
            else:
                next_title_y = page_bottom if page_bottom > page_top else float('inf')

            # 收集属于这个标题的内容
            title_y = title['bbox'][1]
            content_in_section = []

            # 从所有文本中找出位于这个标题和下一个标题之间的内容
            for item in sorted_items:
                item_y = item['bbox'][1]
                # 检查项目是否在当前标题和下一个标题之间
                if title_y <= item_y < next_title_y:
                    # 排除标题本身
                    if item not in [t[0] for t in all_titles]:
                        content_in_section.append(item)

            organized_paragraphs.append({
                'title': title,
                'title_type': title_type,
                'content': content_in_section,
                'bbox': title['bbox']
            })
    else:
        # 如果没有检测到标题，则将所有内容作为一个整体
        organized_paragraphs.append({
            'title': None,
            'title_type': 'none',
            'content': sorted_items,
            'bbox': [0, 0, 0, 0]
        })

    # 也保留原有的基于 proximity 的段落分组
    # 创建集合以便快速查找页眉和页脚
    header_texts = {header['text'].strip() for header in headers}
    footer_texts = {footer['text'].strip() for footer in footers}

    for group in proximity_groups:
        if group:  # 确保组不为空
            # 按垂直位置排序组内的文本行
            group.sort(key=lambda x: x['center'][1])

            # 将组内文本连接成段落文本
            paragraph_text = ''.join([line['text'] for line in group])

            # 排除页眉和页脚内容
            if paragraph_text.strip() in header_texts or paragraph_text.strip() in footer_texts:
                continue

            # 获取段落的边界框
            min_top = min(line['top'] for line in group)
            max_bottom = max(line['bottom'] for line in group)

            # 计算段落的近似左边界和右边界
            min_left = min(min(point[0] for point in line['box']) for line in group)
            max_right = max(max(point[0] for point in line['box']) for line in group)

            # 检查段落中是否有足够的加粗行来判断是否为标题段落
            bold_line_count = 0
            contains_special_text = False

            # 遍历组中的每一行，查找对应的原始项目
            for line in group:
                line_text = line['text']
                if line_text in text_to_item_map:
                    original_item = text_to_item_map[line_text]
                    if is_bold_font(original_item):
                        bold_line_count += 1
                    if is_special_text(original_item):
                        contains_special_text = True

            paragraphs.append({
                'text': paragraph_text,
                'lines': group,
                'bbox': [min_left, min_top, max_right, max_bottom],
                'line_count': len(group),
                'bold_line_count': bold_line_count,
                'is_title_paragraph': bold_line_count >= title_min_bold_lines and not contains_special_text
            })

    return {
        'proximity_groups': proximity_groups,
        'overlapping_pairs': overlapping_pairs,
        'main_titles': main_titles,
        'secondary_titles': secondary_titles,
        'section_titles': section_titles,
        'subsection_titles': subsection_titles,
        'paragraph_titles': paragraph_titles,  # 新增段落标题分类
        'organized_paragraphs': organized_paragraphs,
        'paragraphs': paragraphs,
        'headers': headers,
        'footers': footers,
        'content_lines': content_lines,
        'special_items': special_items,
        'stats': {
            'avg_height': avg_height,
            'max_height': max_height,
            'min_height': min_height,
            'page_top': page_top,
            'page_bottom': page_bottom,
            'page_height': page_bottom - page_top if page_bottom > page_top else 0,
            'header_margin_ratio': header_margin_ratio,
            'footer_margin_ratio': footer_margin_ratio,
            'proximity_vertical_threshold': proximity_vertical_threshold,
            'overlap_threshold': overlap_threshold,
            'thresholds': default_thresholds
        }
    }


def print_analyze_text_structure_v2(analysis_result):
    # 输出分析结果
    print("\n=== 文本结构分析结果 (V2) ===")

    print(f"检测到文档标题数量: {len(analysis_result['main_titles'])}")
    for title in analysis_result['main_titles']:
        font = title.get('font', 'Unknown')
        bold = 'bold' in font.lower() or 'Bold' in font
        print(
            f"  文档标题: '{title['text']}' (高度: {title['height']:.1f}, 字体: {font}, 加粗: {bold}, 位置: ({title['bbox'][0]}, {title['bbox'][1]}))")

    print(f"\n检测到副标题数量: {len(analysis_result['secondary_titles'])}")
    for title in analysis_result['secondary_titles']:
        font = title.get('font', 'Unknown')
        bold = 'bold' in font.lower() or 'Bold' in font
        print(
            f"  副标题: '{title['text']}' (高度: {title['height']:.1f}, 字体: {font}, 加粗: {bold}, 位置: ({title['bbox'][0]}, {title['bbox'][1]}))")

    print(f"\n检测到章节标题数量: {len(analysis_result['section_titles'])}")
    for title in analysis_result['section_titles']:
        font = title.get('font', 'Unknown')
        bold = 'bold' in font.lower() or 'Bold' in font
        print(
            f"  章节标题: '{title['text']}' (高度: {title['height']:.1f}, 字体: {font}, 加粗: {bold}, 位置: ({title['bbox'][0]}, {title['bbox'][1]}))")

    print(f"\n检测到子标题数量: {len(analysis_result['subsection_titles'])}")
    for title in analysis_result['subsection_titles']:
        font = title.get('font', 'Unknown')
        bold = 'bold' in font.lower() or 'Bold' in font
        print(
            f"  子标题: '{title['text']}' (高度: {title['height']:.1f}, 字体: {font}, 加粗: {bold}, 位置: ({title['bbox'][0]}, {title['bbox'][1]}))")

    print(f"\n检测到页眉数量: {len(analysis_result['headers'])}")
    for header in analysis_result['headers']:
        print(f"  页眉: '{header['text']}'")

    print(f"\n检测到页脚数量: {len(analysis_result['footers'])}")
    for footer in analysis_result['footers']:
        print(f"  页脚: '{footer['text']}'")

    # 计算排除页眉页脚后的段落数量
    content_paragraphs = [p for p in analysis_result['paragraphs']
                          if not any(h['text'] == p['text'] for h in analysis_result['headers']) and
                          not any(f['text'] == p['text'] for f in analysis_result['footers'])]

    print(f"\n检测到段落数量: {len(content_paragraphs)} (已排除页眉页脚)")
    title_paragraphs = [p for p in content_paragraphs if p.get('is_title_paragraph', False)]
    print(f"  其中标题段落数量: {len(title_paragraphs)}")
    for i, paragraph in enumerate(content_paragraphs[:5]):  # 只显示前5个段落
        print(f"  段落 {i + 1}:")
        print(f"    内容: '{paragraph['text'][:50]}...'")  # 只显示前50个字符
        print(f"    行数: {paragraph['line_count']}")
        print(f"    加粗行数: {paragraph.get('bold_line_count', 0)}")
        print(f"    是否标题段落: {paragraph.get('is_title_paragraph', False)}")
        print(f"    位置: {paragraph['bbox']}")

    print(f"\n检测到相近文本组数量: {len(analysis_result['proximity_groups'])}")
    print(f"检测到重叠文本对数量: {len(analysis_result['overlapping_pairs'])}")

    # 打印统计信息
    stats = analysis_result['stats']
    print(f"\n文档统计信息:")
    print(f"  平均文字高度: {stats['avg_height']:.2f}")
    print(f"  最大文字高度: {stats['max_height']:.2f}")
    print(f"  最小文字高度: {stats['min_height']:.2f}")


def save_analysis_result_as_markdown(analysis_result, output_path):
    """
    将分析结果保存为 Markdown 格式，便于查看和阅读

    Args:
        analysis_result: analyze_text_structure 函数返回的分析结果
        output_path: 输出文件路径
    """
    with open(output_path, 'w', encoding='utf-8') as f:
        f.write("# 文本结构分析结果\n\n")

        # 写入统计信息
        f.write("## 文档统计信息\n\n")
        stats = analysis_result['stats']
        f.write(f"- 平均文字高度: {stats['avg_height']:.2f}\n")
        f.write(f"- 最大文字高度: {stats['max_height']:.2f}\n")
        f.write(f"- 最小文字高度: {stats['min_height']:.2f}\n")
        f.write(f"- 页面顶部位置: {stats['page_top']:.2f}\n")
        f.write(f"- 页面底部位置: {stats['page_bottom']:.2f}\n")
        f.write(f"- 页面高度: {stats['page_height']:.2f}\n")
        f.write(f"- 页眉区域比例: {stats['header_margin_ratio']}\n")
        f.write(f"- 页脚区域比例: {stats['footer_margin_ratio']}\n")
        f.write(f"- 文本分组垂直阈值: {stats['proximity_vertical_threshold']}\n")
        f.write(f"- 重叠检测阈值: {stats['overlap_threshold']}\n\n")

        # 写入一级标题（文档标题）
        f.write(f"## 文档标题 (数量: {len(analysis_result.get('main_titles', []))})\n\n")
        for i, title in enumerate(analysis_result.get('main_titles', [])):
            font = title.get('font', 'Unknown')
            bold = 'bold' in font.lower() or 'Bold' in font
            f.write(f"{i + 1}. **{title['text']}**\n")
            f.write(f"   - 高度: {title['height']:.1f}\n")
            f.write(f"   - 字体: {font}\n")
            f.write(f"   - 是否加粗: {bold}\n")
            f.write(f"   - 位置: (x={title['bbox'][0]}, y={title['bbox'][1]})\n\n")

        # 写入二级标题（副标题）
        if 'secondary_titles' in analysis_result:
            f.write(f"## 副标题 (数量: {len(analysis_result['secondary_titles'])})\n\n")
            for i, title in enumerate(analysis_result['secondary_titles']):
                font = title.get('font', 'Unknown')
                bold = 'bold' in font.lower() or 'Bold' in font
                f.write(f"{i + 1}. **{title['text']}**\n")
                f.write(f"   - 高度: {title['height']:.1f}\n")
                f.write(f"   - 字体: {font}\n")
                f.write(f"   - 是否加粗: {bold}\n")
                f.write(f"   - 位置: (x={title['bbox'][0]}, y={title['bbox'][1]})\n\n")

        # 写入三级标题（章节标题）
        if 'section_titles' in analysis_result:
            f.write(f"## 章节标题 (数量: {len(analysis_result['section_titles'])})\n\n")
            for i, title in enumerate(analysis_result['section_titles']):
                font = title.get('font', 'Unknown')
                bold = 'bold' in font.lower() or 'Bold' in font
                f.write(f"{i + 1}. **{title['text']}**\n")
                f.write(f"   - 高度: {title['height']:.1f}\n")
                f.write(f"   - 字体: {font}\n")
                f.write(f"   - 是否加粗: {bold}\n")
                f.write(f"   - 位置: (x={title['bbox'][0]}, y={title['bbox'][1]})\n\n")

        # 写入四级标题（子标题/段落标题）
        if 'subsection_titles' in analysis_result:
            f.write(f"## 子标题/段落标题 (数量: {len(analysis_result['subsection_titles'])})\n\n")
            for i, title in enumerate(analysis_result['subsection_titles']):
                font = title.get('font', 'Unknown')
                bold = 'bold' in font.lower() or 'Bold' in font
                f.write(f"{i + 1}. **{title['text']}**\n")
                f.write(f"   - 高度: {title['height']:.1f}\n")
                f.write(f"   - 字体: {font}\n")
                f.write(f"   - 是否加粗: {bold}\n")
                f.write(f"   - 位置: (x={title['bbox'][0]}, y={title['bbox'][1]})\n\n")

        # 写入按标题组织的段落结构
        f.write(f"## 按标题组织的段落结构\n\n")
        # 创建页眉和页脚文本集合，用于排除
        header_texts = {header['text'].strip() for header in analysis_result.get('headers', [])}
        footer_texts = {footer['text'].strip() for footer in analysis_result.get('footers', [])}

        for i, section in enumerate(analysis_result.get('organized_paragraphs', [])):
            if section['title']:
                title_type_map = {
                    'main': '文档标题',
                    'secondary': '副标题',
                    'section': '章节标题',
                    'subsection': '子标题'
                }
                title_type_name = title_type_map.get(section['title_type'], '未知')
                f.write(f"### {title_type_name}: {section['title']['text']}\n\n")
            else:
                f.write(f"### 无标题内容\n\n")

            # 写入该标题下的内容，排除页眉和页脚
            for j, content_item in enumerate(section['content']):
                content_text = content_item['text'].strip()
                # 排除页眉和页脚内容
                if content_text not in header_texts and content_text not in footer_texts:
                    f.write(f"{content_text}\n")
            f.write("\n")

        # 写入页眉
        f.write(f"## 页眉 (数量: {len(analysis_result['headers'])})\n\n")
        for i, header in enumerate(analysis_result['headers']):
            f.write(f"{i + 1}. {header['text']}\n\n")

        # 写入页脚
        f.write(f"## 页脚 (数量: {len(analysis_result['footers'])})\n\n")
        for i, footer in enumerate(analysis_result['footers']):
            f.write(f"{i + 1}. {footer['text']}\n\n")

        # 写入原有的段落分组（排除页眉和页脚）
        # 构建页眉和页脚文本集合
        header_texts = {header['text'].strip() for header in analysis_result.get('headers', [])}
        footer_texts = {footer['text'].strip() for footer in analysis_result.get('footers', [])}

        # 过滤掉页眉和页脚的段落
        content_paragraphs = [
            p for p in analysis_result.get('paragraphs', [])
            if p['text'].strip() not in header_texts and p['text'].strip() not in footer_texts
        ]

        f.write(f"## 原始段落分组 (数量: {len(content_paragraphs)})\n\n")
        for i, paragraph in enumerate(content_paragraphs):
            f.write(f"### 段落 {i + 1}\n\n")
            f.write(f"{paragraph['text']}\n\n")
            f.write(f"- 行数: {paragraph['line_count']}\n")
            f.write(f"- 位置: {paragraph['bbox']}\n\n")

        # 写入其他信息
        f.write(f"## 其他信息\n\n")
        f.write(f"- 相近文本组数量: {len(analysis_result['proximity_groups'])}\n")
        f.write(f"- 重叠文本对数量: {len(analysis_result['overlapping_pairs'])}\n")

        # 写入阈值配置
        f.write(f"\n## 阈值配置\n\n")
        thresholds = stats['thresholds']
        for key, value in thresholds.items():
            f.write(f"- {key}: {value}\n")


"""" pdf 2 markdown"""


def apply_markdown_style(text, flags):
    """根据 flags 应用简单的 Markdown 样式"""
    # 注意：Markdown 不支持下划线和删除线的标准语法，但有些渲染器支持。
    # 这里为了演示，使用常见的扩展语法。
    # 标准 Markdown: **bold**, *italic*
    # Common extensions: ++underline++, ~~strikethrough~~

    # 按照标准优先级或常见顺序处理：粗体 -> 斜体 -> 下划线 -> 删除线
    # 实际应用中，可能需要更复杂的嵌套处理

    is_bold = flags & 16  # Check for bold flag
    is_italic = flags & 2  # Check for italic flag
    is_underline = flags & 64  # Check for underline flag
    is_strikethrough = flags & 128  # Check for strikethrough flag

    # 注意：简单包裹可能导致嵌套问题，例如 "**_text_**" 是有效的，
    # 但 "*__text__*" 在某些解析器中可能有问题。
    # 为简化，我们按一定顺序包裹。

    if is_bold:
        text = f"**{text}**"
    if is_italic:
        text = f"*{text}*"
    if is_underline:
        text = f"++{text}++"  # CommonMark extension or custom
    if is_strikethrough:
        text = f"~~{text}~~"  # CommonMark extension

    # 上标/下标通常不用标准 Markdown 表示，这里略去或用 HTML <sup>/<sub>
    # is_superscript = flags & 1
    # if is_superscript:
    #     text = f"<sup>{text}</sup>"

    return text


def process_text_block(block):
    """处理单个文本块"""
    block_text = ""
    lines = block.get('lines', [])
    for line in lines:
        line_text = ""
        spans = line.get('spans', [])
        for span in spans:
            span_text = span.get('text', '')
            span_flags = span.get('flags', 0)
            # 应用样式
            styled_span_text = apply_markdown_style(span_text, span_flags)
            line_text += styled_span_text
        block_text += line_text + "\n"  # 每行结束后加换行
    # block_text += "\n" # 块结束后可选择加空行，或在调用处统一处理
    return block_text


def process_image_block(block, page_num, img_index):
    """处理单个图像块 (简单插入占位符)"""
    # 可以选择保存图像并生成链接，这里仅作占位符示例
    bbox = block.get('bbox', [0, 0, 0, 0])
    width = bbox[2] - bbox[0]
    height = bbox[3] - bbox[1]
    alt_text = f"Image on page {page_num + 1}, index {img_index}"
    # 如果要保存图像，可以使用 page.get_pixmap(clip=bbox) 等方法
    # 示例：占位符
    return f"\n![{alt_text}](image_p{page_num + 1}_i{img_index}.png)\n"
    # 或者简单描述
    # return f"\n<!-- Image placeholder (Page {page_num+1}) -->\n"


def page_dict_to_markdown(page_dict, page_num):
    """将单页的 dict 结构转换为 Markdown"""
    md_content = ""
    img_counter = 0  # 用于给图片命名

    blocks = page_dict.get('blocks', [])
    for i, block in enumerate(blocks):
        block_type = block.get('type', -1)
        if block_type == 0:  # Text Block
            block_md = process_text_block(block)
            md_content += block_md
            # 可以根据 block 间距或其他启发式规则决定是否添加段落分隔 \n\n
            # 这里简化处理，在每个文本块后加一个换行，但最后会清理多余空行
            md_content += "\n"

        elif block_type == 1:  # Image Block
            block_md = process_image_block(block, page_num, img_counter)
            md_content += block_md
            img_counter += 1
        else:
            # Handle other potential block types if necessary (rare)
            pass

    # 清理多余的连续空行，保留最多两个连续的 \n (表示段落)
    md_content = re.sub(r'\n{3,}', '\n\n', md_content).strip()
    return md_content


def pdf_to_markdown_manual(pdf_path, output_md_path):
    """
    使用 manual 方法将PDF转换为Markdown
    """
    try:
        doc = fitz.open(pdf_path)
        full_md = ""

        for page_num in range(doc.page_count):
            page = doc[page_num]
            # 获取页面 dict 结构
            page_dict = page.get_text("dict")
            # 转换为 Markdown
            page_md = page_dict_to_markdown(page_dict, page_num)

            # 添加分页标记 (可选)
            # full_md += f"\n\n--- Page {page_num + 1} ---\n\n"
            full_md += page_md
            # 在页面之间添加分隔 (如果需要)
            # full_md += "\n\n---\n\n"

        # 写入文件
        with open(output_md_path, "w", encoding="utf-8") as f:
            f.write(full_md)

        print(f"PDF '{pdf_path}' 已成功通过 manual 方法转换为 Markdown 并保存到 '{output_md_path}'")

    except Exception as e:
        print(f"处理PDF时出错: {e}")
    finally:
        if 'doc' in locals():
            doc.close()


import statistics  # 用于计算统计数据


def process_page_to_markdown(json_data, para_spacing_multiplier=1.5):
    """
    将 page_0.json 的数据处理成 Markdown 格式的文本草稿，并收集统计信息。

    :param json_data: dict, 加载自 page_0.json 的数据。
    :param para_spacing_multiplier: float, 用于计算段落间距阈值的倍数。
                                    阈值 = 平均行高 * para_spacing_multiplier。
                                    默认为 1.5。
    :return: tuple, (str: 生成的 Markdown 文本, dict: 统计信息)
    """
    markdown_lines = []
    stats = {
        "total_blocks": 0,
        "text_blocks": 0,
        "total_lines": 0,
        "paragraphs": 0,  # 段落数量（通过间距判断）
        "headings": {f"h{i}": 0 for i in range(1, 7)},  # h1 到 h6 的数量
        "lists_items": 0,  # 列表项数量
        "characters": 0,  # 总字符数 (不含空格)
        "words": 0,  # 总词数 (简单按空格分割)
        "avg_line_height": 0,
        "paragraph_threshold": 0,
        "size_samples": [],  # 收集所有 size 值用于分析
        "processed_lines": []  # 存储处理后的每行文本，用于生成简略信息
    }

    # 1. 获取 blocks 列表
    blocks = json_data.get("blocks", [])
    stats["total_blocks"] = len(blocks)

    # --- 预处理：计算平均行高等基础数据 ---
    total_line_height = 0
    line_count = 0

    for block in blocks:
        if block.get("type") == 0:  # 仅处理文本块
            stats["text_blocks"] += 1
            lines = block.get("lines", [])
            for line in lines:
                stats["total_lines"] += 1
                # 计算行高
                bbox = line.get("bbox", [0, 0, 0, 0])
                line_height = bbox[3] - bbox[1]
                if line_height > 0:  # 避免除零
                    total_line_height += line_height
                    line_count += 1

                # 收集 span 字体大小和文本信息
                spans = line.get("spans", [])
                line_text_for_stats = ""
                for span in spans:
                    size = span.get("size", 0)
                    if size > 0:
                        stats["size_samples"].append(size)
                    text = span.get("text", "")
                    line_text_for_stats += text

                # 更新字符和词数统计
                stats["characters"] += len(line_text_for_stats.replace(' ', ''))  # 去掉空格计字符
                stats["words"] += len(line_text_for_stats.split())  # 简单按空格分词
                # 保存处理前的行文本用于简略信息
                stats["processed_lines"].append(line_text_for_stats.strip())

    stats["avg_line_height"] = total_line_height / line_count if line_count > 0 else 12  # 默认 12pt
    stats["paragraph_threshold"] = stats["avg_line_height"] * para_spacing_multiplier

    # --- 标题阈值设定 (基于 pt 单位和样本数据) ---
    # 提供一个基于统计数据的动态阈值建议（可选）
    if stats["size_samples"]:
        try:
            # 例如，使用 90%, 75%, 60% 分位数作为 H1, H2, H3 的参考阈值
            p90 = statistics.quantiles(stats["size_samples"], n=10)[8] if len(stats["size_samples"]) >= 10 else max(stats["size_samples"])
            p75 = statistics.quantiles(stats["size_samples"], n=4)[2] if len(stats["size_samples"]) >= 4 else (p90 + statistics.median(stats["size_samples"])) / 2
            p60 = statistics.quantiles(stats["size_samples"], n=5)[2] if len(stats["size_samples"]) >= 5 else statistics.median(stats["size_samples"])
            dynamic_thresholds_hint = {
                "H1_suggested": round(p90, 2),
                "H2_suggested": round(p75, 2),
                "H3_suggested": round(p60, 2)
            }
            stats.update(dynamic_thresholds_hint)
        except Exception:
            # 如果计算分位数失败（数据量太少等），则跳过
            pass

    # 固定阈值（可根据需要调整）
    title_size_thresholds = {
        1: 24,  # H1: > 24pt
        2: 20,  # H2: > 20pt
        3: 16,  # H3: > 16pt
        4: 14,  # H4: > 14pt
        5: 12,  # H5: > 12pt
        6: 10  # H6: > 10pt
    }
    # --- 预处理结束 ---

    # 用于计算垂直间距，判断段落
    previous_block_bottom = None
    # 段落计数初始化为1，因为第一段前面没有间隙
    if any(b.get("type") == 0 for b in blocks):
        stats["paragraphs"] = 1

    for i, block in enumerate(blocks):
        block_type = block.get("type", -1)

        # 假设 type 0 是文本块 (Text Block)
        if block_type == 0:
            lines = block.get("lines", [])

            # --- 段落分隔逻辑 ---
            current_block_bbox = block.get("bbox", [0, 0, 0, 0])
            current_block_top = current_block_bbox[1]

            if previous_block_bottom is not None:
                vertical_gap = current_block_top - previous_block_bottom
                if vertical_gap > stats["paragraph_threshold"]:
                    markdown_lines.append("")  # 添加空行表示段落分隔
                    stats["paragraphs"] += 1  # 增加段落计数

            # --- 段落分隔逻辑结束 ---

            # 处理 block 内的每一行
            for line in lines:
                line_text_parts = []
                spans = line.get("spans", [])

                # 收集一行中的所有文本片段
                for span in spans:
                    text = span.get("text", "")
                    line_text_parts.append(text)

                line_text = "".join(line_text_parts).rstrip()  # 合并并去除尾随空格

                if not line_text.strip():  # 跳过完全由空格组成的行
                    continue

                # --- 标题识别逻辑 ---
                max_font_size = 0
                for span in spans:
                    size = span.get("size", 0)
                    if size > max_font_size:
                        max_font_size = size

                header_level = 0
                for level, threshold in sorted(title_size_thresholds.items(), reverse=True):
                    if max_font_size > threshold:
                        header_level = level
                        break

                if header_level > 0:
                    stats["headings"][f"h{header_level}"] += 1  # 更新标题统计

                header_prefix = "#" * header_level + " " if header_level > 0 else ""
                # --- 标题识别逻辑结束 ---

                # --- 列表识别逻辑 ---
                list_prefix = ""
                stripped_line_text = line_text.lstrip()
                original_len = len(line_text)
                stripped_len = len(stripped_line_text)
                leading_spaces_removed = original_len - stripped_len

                is_list_item = False
                if stripped_line_text.startswith(('-', '*', '+')) and len(stripped_line_text) > 1 and stripped_line_text[1] == ' ':
                    list_prefix = "- "
                    line_text = " " * leading_spaces_removed + stripped_line_text[2:].lstrip()
                    is_list_item = True
                elif stripped_line_text[0].isdigit():
                    dot_index = stripped_line_text.find('.')
                    if dot_index > 0:
                        number_part = stripped_line_text[:dot_index]
                        if number_part.isdigit():
                            rest_part = stripped_line_text[dot_index + 1:]
                            if not rest_part or rest_part[0] == ' ':
                                list_prefix = f"{number_part}. "
                                prefix_len_total = leading_spaces_removed + dot_index + 1
                                if prefix_len_total < len(line_text) and line_text[prefix_len_total] == ' ':
                                    line_text = " " * leading_spaces_removed + line_text[prefix_len_total + 1:]
                                else:
                                    line_text = " " * leading_spaces_removed + rest_part.lstrip()
                                is_list_item = True

                if is_list_item:
                    stats["lists_items"] += 1  # 更新列表项统计
                # --- 列表识别逻辑结束 ---

                # --- 特殊情况处理 ---
                if list_prefix and not line_text.strip():
                    list_prefix = ""
                    stats["lists_items"] -= 1  # 如果取消了列表项，则减回去
                # --- 特殊情况处理结束 ---

                # 组合最终的 Markdown 行
                final_line = ""
                if header_prefix:
                    final_line = f"{header_prefix}{line_text.strip()}"
                elif list_prefix:
                    final_line = f"{line_text[:leading_spaces_removed]}{list_prefix}{line_text[leading_spaces_removed:].lstrip()}"
                else:
                    final_line = line_text

                markdown_lines.append(final_line)

            # 更新 previous_block_bottom 为当前 block 的底部
            previous_block_bottom = current_block_bbox[3]

        # 可以在这里处理其他类型的 block

    return "\n".join(markdown_lines), stats


def print_statistics(stats):
    """打印格式化的统计信息"""
    print("\n========== 文档统计信息 ==========")
    print(f"总块数 (Blocks): {stats['total_blocks']}")
    print(f"文本块数 (Text Blocks): {stats['text_blocks']}")
    print(f"总行数 (Lines): {stats['total_lines']}")
    print(f"段落数 (Paragraphs): {stats['paragraphs']}")
    print("-" * 20)
    print("标题统计:")
    total_headings = 0
    for level, count in stats['headings'].items():
        print(f"  {level.upper()}: {count}")
        total_headings += count
    print(f"  总计标题: {total_headings}")
    print("-" * 20)
    print(f"列表项数 (List Items): {stats['lists_items']}")
    print("-" * 20)
    print(f"总字符数 (Characters, excl. spaces): {stats['characters']}")
    print(f"总词数 (Words, space-delimited): {stats['words']}")
    print("-" * 20)
    print(f"平均行高 (Avg Line Height): {stats['avg_line_height']:.2f} pt")
    print(f"段落间距阈值 (Paragraph Threshold): {stats['paragraph_threshold']:.2f} pt")
    if 'H1_suggested' in stats:
        print("-" * 20)
        print("动态标题阈值建议 (仅供参考):")
        print(f"  H1 建议 > {stats['H1_suggested']} pt (90th percentile)")
        print(f"  H2 建议 > {stats['H2_suggested']} pt (75th percentile)")
        print(f"  H3 建议 > {stats['H3_suggested']} pt (60th percentile)")
    print("=" * 30)

    # --- 简略文本信息 ---
    print("\n========== 简略文本信息 ==========")
    processed_lines = stats.get('processed_lines', [])
    if processed_lines:
        # 打印前几行和后几行作为示例
        preview_lines = 5
        print(f"前 {preview_lines} 行文本预览:")
        for i, line in enumerate(processed_lines[:preview_lines]):
            # 限制每行显示长度
            preview_text = (line[:100] + '...') if len(line) > 100 else line
            print(f"  [{i + 1:2d}] {preview_text}")

        if len(processed_lines) > preview_lines:
            print("  ...")
            print(f"后 {preview_lines} 行文本预览:")
            start_idx = max(preview_lines, len(processed_lines) - preview_lines)
            for i, line in enumerate(processed_lines[start_idx:], start=start_idx + 1):
                preview_text = (line[:100] + '...') if len(line) > 100 else line
                print(f"  [{i:2d}] {preview_text}")
    else:
        print("未检测到文本行。")
    print("=" * 30)


if __name__ == "__main__":
    # input_pdf = TEST_DATA_DIR / "25-注会-轻1-财务成本管理[上册](第3章).pdf"
    # input_pdf = TEST_DATA_DIR / "1715340454803.pdf"
    # input_pdf = TEST_DATA_DIR / "1715306099910.pdf"
    # input_pdf = TEST_DATA_DIR / "1711605374231.pdf"
    input_pdf = TEST_DATA_DIR / "1715339805571.pdf"
    #
    # markdown_output_path = OUTPUT_DATA_DIR / "output_manual.md"
    # pdf_to_markdown_manual(input_pdf, markdown_output_path)

    page_range = (0, 0)
    zoom = 1.0
    filename_without_ext = f"{extract_filename_without_extension(input_pdf)}_v3"

    doc = fitz.open(input_pdf)
    try:
        pages = get_pages_from_range(input_pdf, page_range=page_range)
        ocr_results = []
        for page in pages:
            page_num = page.number
            # 获取页内容区域
            clip_rect = pdf_get_content_region(page, zoom_factor=zoom, is_show_log=False)
            block_dict = page.get_text("dict", clip=clip_rect, sort=True)

            # # text = page.get_text()
            # # dict = page.get_text('dict', clip=clip_rect, sort=True)
            # blocks = page.get_text('blocks', clip=clip_rect, sort=True)
            # words = page.get_text('words', clip=clip_rect, sort=True)
            # rawdict = page.get_text('rawdict', clip=clip_rect, sort=True)
            # rawjson = page.get_text('rawjson', clip=clip_rect, sort=True)
            # json = page.get_text('json', clip=clip_rect, sort=True)
            # # html = page.get_text('html', clip=clip_rect, sort=True, flags=mupdf.FZ_STEXT_PRESERVE_IMAGES)
            # html = page.get_text('html', clip=clip_rect, sort=True)
            # with open(OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}.html", "w", encoding="utf-8") as file:
            #     file.write(html)
            #
            # def pdf_page_to_markdown_via_html(page):
            #     import html2text
            #     html_str = page.get_text("html")
            #     h = html2text.HTML2Text()
            #     h.ignore_links = False  # 可根据需要调整选项
            #     # h.body_width = 0 # 可选：禁用自动换行
            #     md_str = h.handle(html_str)
            #     return md_str
            #
            # pdf_page_to_markdown_via_html(page)
            # xhtml = page.get_text('xhtml', clip=clip_rect, sort=True)
            # with open(OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}.xhtml", "w", encoding="utf-8") as file:
            #     file.write(xhtml)
            # xml = page.get_text('xml', clip=clip_rect, sort=True)
            # with open(OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}.xml", "w", encoding="utf-8") as file:
            #     file.write(xml)
            #
            # text = page.get_text('text', clip=clip_rect, sort=True)

            # 去除重复的span
            deduplicated_dict = deduplicate_spans_in_blocks_with_stats(block_dict, verbose=False)

            # 2. 处理数据
            para_spacing_multiplier = 1.5  # 可调整
            markdown_output, statistics_info = process_page_to_markdown(deduplicated_dict, para_spacing_multiplier=para_spacing_multiplier)

            # 3. 输出统计信息
            print_statistics(statistics_info)

            # 4. 输出 Markdown 结果
            print(f"\n--- 生成的 Markdown 草稿 (前10行) ---")
            md_lines = markdown_output.splitlines()
            for i, line in enumerate(md_lines[:10]):
                print(f"{i + 1:2d}: {line}")
            if len(md_lines) > 10:
                print("  ...")
            print("-" * 20)

            markdown_output_path = OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}process_page_to_markdown.md"
            with open(markdown_output_path, "w", encoding="utf-8") as file:
                file.write(markdown_output)

            # for block in deduplicated_dict.get("blocks", []):
            #     for line in block.get("lines", []):
            #         for span in line.get("spans", []):
            #             text = span["text"]
            #             bbox = fitz.Rect(span["bbox"])
            #             ocr_results.append({
            #                 "text": text,
            #                 "bbox": list(bbox.irect),
            #                 "height": bbox.irect.height,
            #                 "width": bbox.irect.width,
            #                 "size": span["size"],
            #                 "flags": span["flags"],
            #                 "bidi": span["bidi"],
            #                 "char_flags": span["char_flags"],
            #                 "font": span["font"],
            #                 "color": span["color"],
            #                 # "alpha": span["alpha"],
            #                 "ascender": span["ascender"],
            #                 "descender": span["descender"],
            #             })
            # output_path = OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}.json"
            # json_str = json.dumps(ocr_results, indent=4, ensure_ascii=False)
            # if output_path:
            #     with open(output_path, "w", encoding="utf-8") as file:
            #         file.write(json_str)

    finally:
        doc.close()

    #
    # ocr_results, filename_without_ext, page_num = load_pdf(input_pdf, page_range)
    #
    # # 使用group_text_lines_by_proximity,detect_overlapping_text_lines,calculate_rectangle_center 函数分析出文本的段落章节，标题等
    # # 自定义所有阈值（类似PP-StructureV3的配置方式）
    # all_thresholds = {
    #     'title_size_ratio': 1.2,
    #     'title_min_size': 16,
    #     'large_title_position_ratio': 0.2,
    #     'section_title_position_ratio': 0.5,
    #     'section_title_size_ratio': 1.4,
    #     'main_title_size_ratio': 1.3,
    # }
    # analysis_result = analyze_text_structure_v2(ocr_results,
    #                                             header_margin_ratio=0.1,
    #                                             footer_margin_ratio=0.0,
    #                                             thresholds=all_thresholds)
    #
    # print_analyze_text_structure_v2(analysis_result)
    #
    # # 保存分析结果
    # analysis_output_path = OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}_analysis.json"
    # with open(analysis_output_path, "w", encoding="utf-8") as file:
    #     json.dump(analysis_result, file, indent=4, ensure_ascii=False,
    #               default=lambda o: '<not serializable>' if isinstance(o, (fitz.Rect,)) else o)
    # # 保存为 Markdown 格式
    # markdown_output_path = OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}_analysis_v2.md"
    # save_analysis_result_as_markdown(analysis_result, markdown_output_path)
    # print(f"\n分析结果已保存为 Markdown 格式: {markdown_output_path}")
