import json
import statistics

from app.utils.file_tools import clear_logs, extract_filename_without_extension
from app.utils.pymupdf_tools import get_pages_from_range, pdf_get_content_region, deduplicate_spans_in_blocks_with_stats, extract_json
from tests.base_test import base_test_case

logger = base_test_case.get_logger(__name__)
TEST_DATA_DIR = base_test_case.test_data_dir
OUTPUT_DATA_DIR = base_test_case.output_data_dir
CONF_DIR = base_test_case.conf_dir
CONF_PIPELINE_DIR = base_test_case.conf_pipeline_dir
import fitz
from collections import Counter, defaultdict


# --- 1. Page Analyzer ---
class PageAnalyzer:
    """分析页面字典，提取元素和统计数据。"""

    def __init__(self):
        self.stats = defaultdict(list)  # 存储各种统计数据
        self.elements = []

    def analyze(self, page_dict):
        """执行分析"""
        self.stats.clear()
        self.elements = []
        self.stats['all_span_fonts'] = set()

        for block_num, block in enumerate(page_dict.get("blocks", [])):
            if block['type'] != 0:  # 仅处理文本块
                continue

            block_info = {
                'block_num': block_num,
                'bbox': block['bbox'],
                'block_height': block['bbox'][3] - block['bbox'][1],
                'lines': []
            }

            for line_num, line in enumerate(block.get("lines", [])):
                line_info = {
                    'line_num': line_num,
                    'bbox': line['bbox'],
                    'line_height': line['bbox'][3] - line['bbox'][1],
                    'spans': [],
                    'text': "",
                    'max_size': 0,
                    'avg_size': 0,
                    'primary_font': None  # 可以基于 span 数量或长度加权
                }

                sizes_in_line = []
                fonts_in_line = Counter()
                total_chars = 0

                for span in line.get("spans", []):
                    span_text = span.get("text", "")
                    span_size = span["size"]
                    span_font = span["font"]
                    span_bbox = span["bbox"]
                    span_height = span_bbox[3] - span_bbox[1]

                    line_info['spans'].append(span)
                    line_info['text'] += span_text
                    line_info['max_size'] = max(line_info['max_size'], span_size)

                    sizes_in_line.extend([span_size] * len(span_text))  # 按字符权重记录大小
                    fonts_in_line[span_font] += len(span_text)
                    # line_info['line_height'].append(span_height)
                    total_chars += len(span_text)

                    # 收集全局统计
                    self.stats['all_span_sizes'].append(span_size)
                    self.stats['all_span_fonts'].add(span_font)
                    self.stats['all_line_heights'].append(line_info['line_height'])
                    # 可以收集更多，如颜色等

                if sizes_in_line:
                    line_info['avg_size'] = sum(sizes_in_line) / len(sizes_in_line)
                if fonts_in_line:
                    line_info['primary_font'] = fonts_in_line.most_common(1)[0][0]

                block_info['lines'].append(line_info)

            self.elements.append(block_info)

        # 计算最终统计数据
        self._calculate_final_stats()

    def _calculate_final_stats(self):
        """基于收集的数据计算最终统计值。"""
        all_sizes = self.stats['all_span_sizes']
        all_heights = self.stats['all_line_heights']
        all_fonts = list(self.stats['all_span_fonts'])

        if all_sizes:
            self.stats['unique_sorted_sizes'] = sorted(set(all_sizes), reverse=True)
            self.stats['size_counts'] = Counter(all_sizes)
            self.stats['avg_span_size'] = statistics.mean(all_sizes)
            self.stats['median_span_size'] = statistics.median(all_sizes)

        if all_heights:
            self.stats['avg_line_height'] = statistics.mean(all_heights)
            self.stats['median_line_height'] = statistics.median(all_heights)

        self.stats['fonts'] = all_fonts

    def get_stats(self):
        """返回统计数据。"""
        return dict(self.stats)  # 转换为普通dict

    def get_elements(self):
        """返回初步结构化元素。"""
        return self.elements


# --- 2. Style Classifier ---
class StyleClassifier:
    """根据规则对元素进行分类。"""

    def __init__(self, size_classification_map=None, custom_classifier_func=None):
        """
        Args:
            size_classification_map (dict, optional): {size_value: 'type'} 映射。
                                                     例如 {14.0: 'h1', 12.0: 'h2', 10.0: 'p'}
                                                     如果提供，优先级高于默认逻辑。
            custom_classifier_func (callable, optional): 自定义分类函数。
                                                         func(element, stats) -> str
        """
        self.size_map = size_classification_map
        self.custom_func = custom_classifier_func

    def classify(self, elements, stats):
        """为元素列表中的每个元素分配类型。"""
        for element in elements:  # 遍历 block
            for line in element['lines']:  # 遍历 block 内的 line
                line['type'] = self._classify_line(line, stats)
        return elements

    def _classify_line(self, line, stats):
        """分类单个 line。"""
        if self.custom_func:
            return self.custom_func(line, stats)

        if self.size_map:
            # 使用提供的映射表
            # 可能需要考虑浮点数比较的容忍度
            for size_key, type_val in self.size_map.items():
                if abs(line['max_size'] - size_key) < 1e-6:  # 精确匹配或极小误差
                    return type_val
            # 如果没找到匹配，默认为段落?
            return 'p'

        # 默认逻辑：基于统计信息
        unique_sizes = stats.get('unique_sorted_sizes', [])
        if not unique_sizes:
            return 'p'

        # 简单示例：最大的几种作为标题
        # 这里可以根据需求调整，比如取前 N 个，或者根据大小差距
        header_candidates = unique_sizes[:3]  # 假设前3大是标题

        if line['max_size'] in header_candidates:
            # 可以进一步区分 h1, h2, h3...
            # 例如：index = header_candidates.index(line['max_size'])
            #      return f'h{index+1}'
            # 简化处理：
            # 假设第一个是 h1, 其他是 h2
            if line['max_size'] == unique_sizes[0]:
                return 'h1'
            else:
                return 'h2'  # 或 'h3' 等

        return 'p'  # 默认段落


# --- 3. Merger ---
class Merger:
    """执行元素合并操作。"""

    def __init__(self, size_tolerance=0.5):
        self.size_tolerance = size_tolerance

    def merge(self, elements, stats):
        """执行所有合并操作。"""
        # 1. 合并 Block 内部的段落 Line
        elements = self.merge_consecutive_lines_in_blocks(elements, stats)
        # 2. (可选) 合并跨 Block 的段落 Line (需要更复杂的逻辑)
        # elements = self.merge_cross_block_lines(elements, stats)
        return elements

    def merge_consecutive_lines_in_blocks(self, elements, stats):
        """合并同一 block 内连续的、大小相近的段落 line。"""
        for element in elements:  # 遍历每个 block
            if not element['lines']:
                continue

            merged_lines = []
            current_paragraph_lines = []

            def flush_paragraph():
                if current_paragraph_lines:
                    # 合并文本和 spans
                    merged_text = ""
                    merged_spans = []
                    base_size = current_paragraph_lines[0]['max_size']  # 取第一行大小
                    for l in current_paragraph_lines:
                        # 添加空格逻辑可以更复杂，这里简化
                        sep = " " if merged_text and not merged_text.endswith(('-', ' ')) and not l['text'].startswith((' ', '-')) else ""
                        merged_text += sep + l['text']
                        merged_spans.extend(l['spans'])

                    merged_lines.append({
                        'type': 'p_merged',  # 标记为已合并的段落
                        'text': merged_text.strip(),
                        'max_size': base_size,
                        'avg_size': sum(l['avg_size'] for l in current_paragraph_lines) / len(current_paragraph_lines),
                        'spans': merged_spans,
                        # 可以计算合并后的 bbox
                        'bbox': [
                            min(l['bbox'][0] for l in current_paragraph_lines),
                            min(l['bbox'][1] for l in current_paragraph_lines),
                            max(l['bbox'][2] for l in current_paragraph_lines),
                            max(l['bbox'][3] for l in current_paragraph_lines),
                        ]
                    })
                    current_paragraph_lines.clear()

            for line in element['lines']:
                if line['type'].startswith('h'):  # 遇到标题
                    flush_paragraph()  # 先冲刷之前的段落
                    merged_lines.append(line)  # 直接添加标题
                elif line['type'] == 'p':  # 遇到段落
                    if not current_paragraph_lines:
                        # 开始新的段落组
                        current_paragraph_lines.append(line)
                    else:
                        # 检查是否与当前段落组合并
                        last_line = current_paragraph_lines[-1]
                        # 使用 avg_size 或 max_size 进行比较，这里用 max_size
                        if abs(line['max_size'] - last_line['max_size']) <= self.size_tolerance:
                            # 大小相近，合并
                            current_paragraph_lines.append(line)
                        else:
                            # 大小不同，结束当前段落，开始新段落
                            flush_paragraph()
                            current_paragraph_lines.append(line)
                else:
                    # 其他类型，暂时按原样处理？或者也冲刷段落？
                    flush_paragraph()
                    merged_lines.append(line)

            # 循环结束后冲刷最后一个段落
            flush_paragraph()
            element['lines'] = merged_lines  # 更新 block 的 lines

        return elements

    # def merge_cross_block_lines(self, elements, stats):
    #     # TODO: 实现跨 block 合并逻辑
    #     # 需要考虑 block 之间的垂直距离、水平对齐等因素
    #     pass


# --- 4. Markdown Generator ---
class MarkdownGenerator:
    """将结构化元素转换为 Markdown。"""

    def generate(self, elements):
        """生成 Markdown 字符串。"""
        md_lines = []
        for element in elements:
            for line in element['lines']:
                line_type = line['type']
                text = line['text'].strip()
                if not text:
                    continue

                if line_type == 'h1':
                    md_lines.append(f"# {text}\n")
                elif line_type == 'h2':
                    md_lines.append(f"## {text}\n")
                elif line_type == 'h3':
                    md_lines.append(f"### {text}\n")
                elif line_type.startswith('p'):  # 'p' or 'p_merged'
                    # 清理文本，确保段落格式正确
                    clean_text = text.replace('\n', ' ').replace('  ', ' ')
                    md_lines.append(f"{clean_text}\n")
                # 可以添加更多类型处理...

            # block 之间可以考虑加空行分隔
            md_lines.append("")  # Add blank line between blocks

        return "\n".join(md_lines).strip()  # 最终清理首尾空白


# --- 5. Main Converter Class ---
class FlexiblePDFToMarkdownConverter:
    """灵活的 PDF 到 Markdown 转换器。"""

    def __init__(self, size_classification_map=None, custom_classifier_func=None,
                 size_merge_tolerance=0.5, additional_merger_rules=None):
        """
        Args:
            size_classification_map (dict, optional): 字体大小到类型的映射。
            custom_classifier_func (callable, optional): 自定义分类函数。
            size_merge_tolerance (float): 合并时字体大小的容忍度。
            additional_merger_rules (dict, optional): 其他合并规则。
        """
        self.analyzer = PageAnalyzer()
        self.classifier = StyleClassifier(
            size_classification_map=size_classification_map,
            custom_classifier_func=custom_classifier_func
        )
        self.merger = Merger(size_tolerance=size_merge_tolerance)
        self.generator = MarkdownGenerator()

    def convert_page(self, page_dict):
        """转换单个页面的字典。"""
        # 1. 分析
        self.analyzer.analyze(page_dict)
        stats = self.analyzer.get_stats()
        elements = self.analyzer.get_elements()

        print(f"Unique Sorted Sizes found: {stats.get('unique_sorted_sizes', [])}")

        # 2. 分类
        elements = self.classifier.classify(elements, stats)

        # 3. 合并
        elements = self.merger.merge(elements, stats)

        # 4. 生成 Markdown
        markdown_text = self.generator.generate(elements)

        return markdown_text, stats  # 返回 markdown 和统计信息以便调试/后续使用

    def convert_pdf_page(self, pdf_path, page_number):
        """便捷方法：打开 PDF 并转换指定页面。"""
        doc = fitz.open(pdf_path)
        page = doc.load_page(page_number)
        page_dict = page.get_text("dict")
        doc.close()
        return self.convert_page(page_dict)


# 示例 3: 使用自定义分类函数 (更灵活)
def example_with_custom_classifier():
    def my_classifier(line_info, stats):
        # 自定义逻辑：大于平均大小+1为标题，否则为段落
        avg_size = stats.get('avg_span_size', 10)
        if line_info['max_size'] > avg_size + 1:
            return 'h2'  # 或根据需要返回 h1/h2
        else:
            return 'p'

    converter = FlexiblePDFToMarkdownConverter(custom_classifier_func=my_classifier, size_merge_tolerance=0.4)
    # markdown, stats = converter.convert_page(page_data)
    # print("--- With Custom Classifier Output ---")
    # print(markdown)


def analyze_page_blocks(page_dict):
    """
    分析 page.get_text('dict') 返回的数据，提取 block 级别的详细信息。
    """
    blocks_info = []
    original_blocks = page_dict.get("blocks", [])  # 引用原始 blocks 列表

    # --- 第一轮：收集基本 block 信息 ---
    for block_index, block in enumerate(page_dict.get("blocks", [])):
        if block['type'] != 0:  # 仅处理文本块
            continue

        block_bbox = block['bbox']
        collected_line_heights = []
        collected_span_sizes = []
        collected_span_fonts = []
        accumulated_full_text = ""

        for line in block.get("lines", []):
            line_bbox = line['bbox']
            line_height = line_bbox[3] - line_bbox[1]
            collected_line_heights.append(line_height)

            for span in line.get("spans", []):
                span_text = span.get("text", "")
                span_size = span.get("size", 0)
                span_font = span.get("font", "")

                span_idx = len(collected_span_sizes)
                collected_span_sizes.append({'span_index': span_idx, 'size': span_size})
                collected_span_fonts.append({'span_index': span_idx, 'font': span_font})
                accumulated_full_text += span_text

        max_line_h = max(collected_line_heights) if collected_line_heights else 0
        min_line_h = min(collected_line_heights) if collected_line_heights else 0
        avg_line_h = statistics.mean(collected_line_heights) if collected_line_heights else 0

        max_span_s = max((s['size'] for s in collected_span_sizes), default=0)
        min_span_s = min((s['size'] for s in collected_span_sizes), default=0)
        avg_span_s = statistics.mean(s['size'] for s in collected_span_sizes) if collected_span_sizes else 0

        block_info = {
            'block_index': block_index,
            'bbox': tuple(block_bbox),
            'line_heights': collected_line_heights,
            'max_line_height': max_line_h,
            'min_line_height': min_line_h,
            'avg_line_height': avg_line_h,
            'vspace_before': None,  # 占位符
            'vspace_after': None,  # 占位符
            'vspaces_before_top5': [],
            'vspaces_after_bottom5': [],
            'vspace_before_top5_median': None,
            'vspace_after_bottom5_median': None,
            'vspace_before_top5_mean': None,
            'vspace_after_bottom5_mean': None,
            'span_sizes': collected_span_sizes,
            'max_span_size': max_span_s,
            'min_span_size': min_span_s,
            'avg_span_size': avg_span_s,
            'span_fonts': collected_span_fonts,
            'full_text': accumulated_full_text
        }
        blocks_info.append(block_info)

    # --- 第二轮：计算需要上下文信息的字段 ---
    num_original_blocks = len(original_blocks)  # 使用原始 blocks 数量
    num_processed_blocks = len(blocks_info)  # 使用处理后的 blocks_info 数量

    if num_processed_blocks == 0:
        print("Warning: No text blocks found in page_dict.")
        return blocks_info

    # 需要建立原始 block index 和 processed block index 之间的映射关系
    # 因为 original_blocks 中可能包含非 type=0 的 block，导致索引不一致
    orig_to_proc_index_map = {info['block_index']: idx for idx, info in enumerate(blocks_info)}

    for i, block_info in enumerate(blocks_info):
        current_orig_index = block_info['block_index']  # 获取在原始列表中的索引
        current_bbox = block_info['bbox']

        # 1. 计算与上一个和下一个 block 的直接间距 (使用 blocks_info 和映射)
        prev_proc_index = orig_to_proc_index_map.get(current_orig_index - 1)
        if prev_proc_index is not None and 0 <= prev_proc_index < num_processed_blocks:
            prev_bbox = blocks_info[prev_proc_index]['bbox']
            block_info['vspace_before'] = round(current_bbox[1] - prev_bbox[3], 2)
        else:
            block_info['vspace_before'] = None

        next_proc_index = orig_to_proc_index_map.get(current_orig_index + 1)
        if next_proc_index is not None and 0 <= next_proc_index < num_processed_blocks:
            next_bbox = blocks_info[next_proc_index]['bbox']
            block_info['vspace_after'] = round(next_bbox[1] - current_bbox[3], 2)
        else:
            block_info['vspace_after'] = None

        # 2. 计算与前/后 5 个 block 的间距 (关键：直接使用 original_blocks)
        # vspaces_before_top5: [i-5 到 i-1 block 的 vspace_after]
        vspaces_before_list = []
        # 我们需要找的是在 original_blocks 中索引为 current_orig_index-5 到 current_orig_index-1 的文本块
        for offset in range(1, 6):  # 1 to 5
            prev_orig_index = current_orig_index - offset
            if prev_orig_index >= 0:
                prev_proc_index_check = orig_to_proc_index_map.get(prev_orig_index)
                # 我们需要的是 prev_orig_index 块 和 (prev_orig_index + 1) 块 之间的间距
                # 也就是 prev_orig_index 块的 vspace_after
                if prev_proc_index_check is not None and (prev_orig_index + 1) < num_original_blocks:
                    # 直接计算 prev_orig_index 块的 vspace_after
                    try:
                        prev_block_bbox = original_blocks[prev_orig_index]['bbox']
                        next_block_bbox = original_blocks[prev_orig_index + 1]['bbox']
                        space = round(next_block_bbox[1] - prev_block_bbox[3], 2)
                        vspaces_before_list.insert(0, space)  # 插入到列表开头以保持顺序
                    except (IndexError, KeyError, TypeError):
                        # Handle potential issues with original data
                        pass  # Or append None if you prefer
            else:
                break  # No more previous blocks
        block_info['vspaces_before_top5'] = vspaces_before_list

        # vspaces_after_bottom5: [i+1 到 i+5 block 的 vspace_before]
        vspaces_after_list = []
        # 我们需要找的是在 original_blocks 中索引为 (current_orig_index+1) 到 (current_orig_index+5) 的文本块
        for offset in range(1, 6):  # 1 to 5
            next_orig_index = current_orig_index + offset
            # 我们要计算的是 next_orig_index-1 块 和 next_orig_index 块 之间的间距
            # 也就是 next_orig_index 块的 vspace_before
            if next_orig_index < num_original_blocks:
                # 直接计算 next_orig_index 块的 vspace_before
                try:
                    current_block_for_calc_bbox = original_blocks[next_orig_index]['bbox']
                    prev_block_for_calc_bbox = original_blocks[next_orig_index - 1]['bbox']
                    space = round(current_block_for_calc_bbox[1] - prev_block_for_calc_bbox[3], 2)
                    vspaces_after_list.append(space)
                except (IndexError, KeyError, TypeError):
                    # Handle potential issues with original data
                    pass  # Or append None if you prefer
            else:
                break  # No more next blocks

        block_info['vspaces_after_bottom5'] = vspaces_after_list

        # 3. 计算 top5 间距的中位数和平均值
        valid_vb = list(filter(lambda x: x is not None, vspaces_before_list))
        if valid_vb:
            block_info['vspace_before_top5_median'] = round(statistics.median(valid_vb), 2)
            block_info['vspace_before_top5_mean'] = round(statistics.mean(valid_vb), 2)
        else:
            block_info['vspace_before_top5_median'] = None
            block_info['vspace_before_top5_mean'] = None

        valid_va = list(filter(lambda x: x is not None, vspaces_after_list))
        if valid_va:
            block_info['vspace_after_bottom5_median'] = round(statistics.median(valid_va), 2)
            block_info['vspace_after_bottom5_mean'] = round(statistics.mean(valid_va), 2)
        else:
            block_info['vspace_after_bottom5_median'] = None
            block_info['vspace_after_bottom5_mean'] = None

    return blocks_info


def convert_blocks_to_markdown_structure(blocks_info):
    """
    将 blocks_info 转换为更贴近文档逻辑结构的中间表示。

    Args:
        blocks_info: 由 analyze_page_blocks 函数生成的 block 信息列表。

    Returns:
        A list of dictionaries representing the logical structure.
        Each dict has keys: 'type', 'level' (for headings), 'content', 'source_block_indices'.
    """
    if not blocks_info:
        return []

    # --- 预处理：为每个 block 添加推断标记 ---
    # 1. 计算全局平均字体大小，用于标题识别阈值
    all_avg_span_sizes = [b['avg_span_size'] for b in blocks_info if b['avg_span_size'] > 0]
    global_avg_span_size = statistics.mean(all_avg_span_sizes) if all_avg_span_sizes else 0
    # 定义标题候选的字体大小阈值 (例如，比平均大 20%)
    heading_size_threshold = global_avg_span_size * 1.2 if global_avg_span_size > 0 else 9

    # 2. 定义列表项的常见前缀模式 (简化版)
    import re
    list_prefix_pattern = re.compile(
        r'^(\s*)'  # 可能的前导空格
        r'(?:'
        r'(\d+\.)|'  # 1. 2. 3.
        r'([一二三四五六七八九十]+、)|'  # 一、 二、
        r'([A-Za-z]\.)|'  # A. B. a. b.
        r'([●○■□▶])|'  # ● ○ ■ □ ▶
        r'(-{1,2})|'  # - --
        r'(\*)'  # *
        r')'
        r'\s*'  # 前缀后的空格
    )

    # 3. 为每个 block 添加标记
    processed_blocks = []
    for i, block in enumerate(blocks_info):
        block_copy = block.copy()  # 避免修改原始数据
        block_copy['is_heading_candidate'] = False
        block_copy['is_list_item_candidate'] = False
        block_copy['list_prefix_match'] = None  # 存储匹配到的前缀

        # 标题候选：基于字体大小和特定字体
        # 注意：你的数据中最大字体是 18.5 (block 1)，其次是 12.95 (block 2)
        # 我们可以设定一个绝对阈值或相对阈值。这里用相对阈值结合绝对大小
        if ((block['max_span_size'] > heading_size_threshold and block['avg_span_size'] > heading_size_threshold) or
                block['max_span_size'] > 15 or
                any("bold" in f.get('font', '').lower() or "heavy" in f.get('font', '').lower() for f in block.get('span_fonts', []))):
            block_copy['is_heading_candidate'] = True

        # 列表项候选：基于文本前缀
        match = list_prefix_pattern.match(block['full_text'].strip())
        if match:
            block_copy['is_list_item_candidate'] = True
            block_copy['list_prefix_match'] = match.group(0)  # 包含前缀和空格

        processed_blocks.append(block_copy)

    # --- 分组与合并 ---
    markdown_elements = []
    i = 0
    num_blocks = len(processed_blocks)

    while i < num_blocks:
        current_block = processed_blocks[i]

        # --- 规则 1: 识别并合并标题 ---
        if current_block['is_heading_candidate']:
            title_content_parts = [current_block['full_text']]
            source_indices = [current_block['block_index']]

            # 向后查找可能属于同一标题的块 (简单合并逻辑)
            j = i + 1
            merged_title = False
            while j < num_blocks:
                next_block = processed_blocks[j]
                # 合并条件：下一个块也是标题候选，且字体大小相近或递减，
                # 且垂直间距不大（表明是紧密相关的部分）
                # 这里简化处理：只要下一个也是标题候选且间距不大就合并
                # 更复杂的逻辑可以加入字体大小比较
                if next_block['is_heading_candidate']:
                    space_to_next = next_block.get('vspace_before', float('inf'))
                    # 如果间距很小，则认为是同一标题的不同部分
                    if space_to_next < 10:  # 阈值可根据实际情况调整
                        title_content_parts.append(next_block['full_text'])
                        source_indices.append(next_block['block_index'])
                        merged_title = True
                        j += 1
                    else:
                        break  # 间距太大，停止合并
                else:
                    break  # 下一个不是标题，停止合并

            # 确定标题级别 (简化版：按字体大小分组)
            # 更精确的方法是对所有标题候选按大小排序并分配级别
            # 这里采用启发式：最大的几个作为 H1, H2...
            title_font_size = current_block['max_span_size']  # 用第一个块的大小判断
            level = 1
            if title_font_size < 18:  # 假设 18+ 是 H1
                level = 2
            if title_font_size < 13:  # 假设 13-18 是 H2
                level = 3
            if title_font_size < 10:  # 假设 10-13 是 H3
                level = 4
            # 可以继续细分...

            markdown_elements.append({
                'type': 'heading',
                'level': level,
                'content': ''.join(title_content_parts).strip(),  # 合并内容
                'source_block_indices': source_indices
            })
            i = j  # 移动主循环指针到已合并的下一个块
            continue  # 处理下一个元素

        # --- 规则 2: 识别并合并段落 ---
        # 如果当前块不是标题也不是列表项，则开始收集段落
        if not current_block['is_heading_candidate'] and not current_block['is_list_item_candidate']:
            para_content_parts = [current_block['full_text']]
            source_indices = [current_block['block_index']]
            j = i + 1

            # 向后查找属于同一段落的块
            while j < num_blocks:
                next_block = processed_blocks[j]

                # 停止条件 1: 遇到标题或列表项
                if next_block['is_heading_candidate'] or next_block['is_list_item_candidate']:
                    break

                # 停止条件 2: 间距过大，认为是新段落
                # 判断依据：当前块到下一块的间距 vs 当前块的行高 或 上方块间距的统计
                space_before_next = next_block.get('vspace_before', float('inf'))
                # 方法一：与当前块行高比较 (可能不准，因为当前块可能只有一行)
                # 方法二：与上方间距中位数比较 (更好)
                median_space_above_next = next_block.get('vspace_before_top5_median', float('inf'))

                # 启发式：如果间距显著大于文本块的典型行高，则认为是新段落
                typical_line_height = next_block.get('avg_line_height', 10)  # 默认值
                # 阈值可以调整，比如 1.5 倍行高，或者一个固定像素值
                paragraph_break_threshold = max(typical_line_height * 1.8, 15)

                if median_space_above_next > paragraph_break_threshold:
                    # print(f"Paragraph break at block {j}, space: {median_space_above_next}, threshold: {paragraph_break_threshold}")
                    break  # 间距足够大，结束当前段落收集

                # 否则，认为是同一段落，合并
                para_content_parts.append(next_block['full_text'])
                source_indices.append(next_block['block_index'])
                j += 1

            markdown_elements.append({
                'type': 'paragraph',
                'content': ''.join(para_content_parts).strip(),
                'source_block_indices': source_indices
            })
            i = j  # 移动主循环指针
            continue  # 处理下一个元素

        # --- 规则 3: 识别列表项 (可选合并) ---
        if current_block['is_list_item_candidate']:
            list_items = []
            j = i
            # 收集连续的列表项
            while j < num_blocks and processed_blocks[j]['is_list_item_candidate']:
                list_block = processed_blocks[j]
                # 移除前缀（如果需要纯文本内容）
                content_without_prefix = list_block['full_text'][len(list_block['list_prefix_match']):].lstrip()
                list_items.append({
                    'type': 'list_item',
                    'content': content_without_prefix.strip(),  # 去掉可能的前后空格
                    'source_block_indices': [list_block['block_index']],
                    'prefix': list_block['list_prefix_match']  # 可选：保留前缀信息
                })
                j += 1

            # 将收集到的列表项作为一个列表组添加
            # 这里简化为将每个列表项单独作为一个 element
            # 或者可以创建一个 'list' 类型的容器，包含 items
            # 采用后者更符合结构化思想
            list_container_items = []
            for item in list_items:
                # 去掉 'type' 和 'prefix'，因为它们是容器级别的信息
                list_container_items.append({
                    'content': item['content'],
                    'source_block_indices': item['source_block_indices']
                })

            markdown_elements.append({
                'type': 'list',  # 定义一个新的容器类型
                'items': list_container_items  # 内容是 item 字典的列表
                # 'source_block_indices' 可以是所有 items 的索引集合
            })
            i = j  # 移动主循环指针
            continue  # 处理下一个元素

        # --- 默认情况 (理论上不太可能到达这里，除非有未处理的类型) ---
        # 可以将其视为普通段落或未知类型处理
        markdown_elements.append({
            'type': 'unknown',
            'content': current_block['full_text'],
            'source_block_indices': [current_block['block_index']]
        })
        i += 1

    return markdown_elements


def convert_blocks_to_markdown_structure_v2(blocks_info, rules):
    """
    将 blocks_info 转换为更贴近文档逻辑结构的中间表示，使用预定义规则。

    Args:
        blocks_info: 由 analyze_page_blocks 函数生成的 block 信息列表。
        rules: 预定义的规则列表，每个规则定义了类型、尺寸范围、位置范围等。

    Returns:
        A list of dictionaries representing the logical structure.
    """
    if not blocks_info or not rules:
        return []

    # --- 预处理：为每个 block 添加推断标记 ---

    # 1. 定义列表项的常见前缀模式 (简化版)
    import re
    list_prefix_pattern = re.compile(
        r'^(\s*)'
        r'(?:'
        r'(\d+\.)|'
        r'([一二三四五六七八九十]+、)|'
        r'([A-Za-z]\.)|'
        r'([●○■□▶])|'
        r'(-{1,2})|'
        r'(\*)'
        r')'
        r'\s*'
    )

    # 2. 为每个 block 添加标记和匹配的规则
    processed_blocks = []
    for i, block in enumerate(blocks_info):
        block_copy = block.copy()
        block_copy['matched_rule'] = None
        block_copy['is_list_item_candidate'] = False
        block_copy['list_prefix_match'] = None

        font_size = block.get('max_span_size', 0)  # 或 avg_span_size?

        # --- 规则匹配 ---
        matched_rule = None
        for rule in rules:
            size_matched = False
            pos_matched = True  # 如果没有 bbox_range，则默认位置匹配

            # 检查尺寸 (尝试匹配规则中的任何一个 Hx 键)
            for key, size_range in rule.items():
                if key.startswith('H') and isinstance(size_range, (tuple, list)) and len(size_range) == 2:
                    if size_range[0] <= font_size <= size_range[1]:
                        size_matched = True
                        break  # 匹配到一个即可

            # 检查位置 (如果规则定义了 bbox_range)
            bbox_range = rule.get('bbox_range')
            if bbox_range and isinstance(bbox_range, (tuple, list)) and len(bbox_range) == 4:
                block_bbox = block.get('bbox', (0, 0, 0, 0))
                # 简单的 AABB 相交检测 (轴对齐边界框)
                # 条件：两个矩形不相交，则至少有一侧完全分离
                if not (block_bbox[2] < bbox_range[0] or  # block 在 bbox_range 左边
                        block_bbox[0] > bbox_range[2] or  # block 在 bbox_range 右边
                        block_bbox[3] < bbox_range[1] or  # block 在 bbox_range 下边
                        block_bbox[1] > bbox_range[3]):  # block 在 bbox_range 上边
                    pos_matched = True
                else:
                    pos_matched = False

            # 如果尺寸和位置都匹配，则认为找到规则
            if size_matched and pos_matched:
                matched_rule = rule
                break  # 找到第一个匹配的规则就停止

        block_copy['matched_rule'] = matched_rule

        # --- 列表项候选 ---
        match = list_prefix_pattern.match(block['full_text'].strip())
        if match:
            block_copy['is_list_item_candidate'] = True
            block_copy['list_prefix_match'] = match.group(0)

        processed_blocks.append(block_copy)

    # --- 分组与合并 ---
    markdown_elements = []
    i = 0
    num_blocks = len(processed_blocks)

    while i < num_blocks:
        current_block = processed_blocks[i]
        matched_rule = current_block.get('matched_rule')

        # --- 规则 1: 处理匹配到规则的块 (主要是标题类) ---
        # 注意：这里假设规则匹配的块不需要与其他类型（如列表项）合并。
        # 如果规则可能匹配列表项前缀，则逻辑需要调整。
        if matched_rule:
            markdown_type = matched_rule.get('markdown_type', 'unknown_by_rule')

            # --- 标题合并逻辑 (简化版，可扩展) ---
            # 这里可以根据规则的特性或 block 的属性来决定是否合并
            # 例如，如果规则是 chapter_title，且下一个块也是 chapter_title 且间距很小，则合并
            # 当前简化处理：不主动合并规则匹配的块，除非规则本身暗示需要（这需要更复杂的规则定义）

            # 直接生成元素
            # 可以根据规则中的 Hx 来推断 level
            level = None
            for key in matched_rule.keys():
                if key.startswith('H'):
                    try:
                        level = int(key[1:])
                        break
                    except ValueError:
                        pass

            # 内容处理：如果需要合并后续具有相同或特定规则的块，可以在这里实现
            # 当前：仅使用当前块内容
            content_parts = [current_block['full_text']]
            source_indices = [current_block['block_index']]

            # 简单的向前合并逻辑示例 (例如，合并连续的小标题)
            # 这需要根据具体规则和文档结构定制
            # 比如：如果当前是 paragraph_title，且下一个块是 paragraph_content 且间距小，则合并
            # 这里暂不实现复杂合并，留待后续细化规则

            markdown_elements.append({
                'type': markdown_type,
                'level': level,  # 可能为 None
                'content': ''.join(content_parts).strip(),
                'source_block_indices': source_indices,
                'rule_alias': matched_rule.get('alias')  # 保留业务别名
            })
            i += 1
            continue  # 处理下一个元素

        # --- 规则 2: 识别列表项 ---
        if current_block['is_list_item_candidate']:
            list_items = []
            j = i
            while j < num_blocks and processed_blocks[j]['is_list_item_candidate']:
                list_block = processed_blocks[j]
                content_without_prefix = list_block['full_text'][len(list_block['list_prefix_match']):].lstrip()
                list_items.append({
                    'type': 'list_item',
                    'content': content_without_prefix.strip(),
                    'source_block_indices': [list_block['block_index']],
                    'prefix': list_block['list_prefix_match']
                })
                j += 1

            list_container_items = []
            for item in list_items:
                list_container_items.append({
                    'content': item['content'],
                    'source_block_indices': item['source_block_indices']
                })

            markdown_elements.append({
                'type': 'list',
                'items': list_container_items,
                'source_block_indices': [item['source_block_indices'][0] for item in list_items]  # Collect indices
            })
            i = j
            continue

        # --- 规则 3: 默认处理未匹配规则且非列表项的块 (视为段落或内容) ---
        # 这部分逻辑可以更精细，比如根据附近块的类型、间距等来判断
        # 当前简化处理：将连续的此类块合并为段落
        if not matched_rule and not current_block['is_list_item_candidate']:
            para_content_parts = [current_block['full_text']]
            source_indices = [current_block['block_index']]
            j = i + 1

            # 向后查找属于同一段落的块 (未匹配规则且非列表项)
            while j < num_blocks:
                next_block = processed_blocks[j]
                if next_block.get('matched_rule') or next_block['is_list_item_candidate']:
                    break

                # 段落合并条件 (可以复用之前的逻辑)
                space_before_next = next_block.get('vspace_before', float('inf'))
                median_space_above_next = next_block.get('vspace_before_top5_median', float('inf'))
                typical_line_height = next_block.get('avg_line_height', 10)
                paragraph_break_threshold = max(typical_line_height * 1.8, 15)

                if median_space_above_next > paragraph_break_threshold:
                    break

                para_content_parts.append(next_block['full_text'])
                source_indices.append(next_block['block_index'])
                j += 1

            markdown_elements.append({
                'type': 'paragraph',  # 或 'content'，取决于你想如何标记默认块
                'content': ''.join(para_content_parts).strip(),
                'source_block_indices': source_indices
            })
            i = j
            continue

        # --- fallback ---
        i += 1

    return markdown_elements


def convert_blocks_to_markdown_structure_v3(blocks_info, rules):
    """
    将 blocks_info 转换为更贴近文档逻辑结构的中间表示，完全由预定义规则驱动。

    Args:
        blocks_info: 由 analyze_page_blocks 函数生成的 block 信息列表。
        rules: 预定义的规则列表，每个规则定义了类型、尺寸范围、位置范围等。

    Returns:
        一个字典列表，代表文档的逻辑结构。
    """
    if not blocks_info or not rules:
        return []

    # --- 第一步：预处理 blocks，寻找匹配的规则 ---
    processed_blocks = []
    for i, block in enumerate(blocks_info):
        block_copy = block.copy()

        # 根据字号和位置寻找最佳匹配的规则
        best_rule = None
        best_score = -1  # 分数越高越好

        font_size = block.get('max_span_size', 0)

        for rule in rules:
            score = 0

            # 检查规则中定义的任何 Hx 是否与字号匹配
            size_matched_key = None
            for key, size_range in rule.items():
                if key.startswith('H') and isinstance(size_range, (tuple, list)) and len(size_range) == 2:
                    if size_range[0] <= font_size <= size_range[1]:
                        size_matched_key = key
                        score += 10  # 字号匹配的基础分数
                        # 可选：更接近范围中心的加分？
                        # range_center = (size_range[0] + size_range[1]) / 2
                        # score += 1.0 / (1.0 + abs(font_size - range_center))
                        break  # 匹配到第一个符合条件的 Hx 后停止

            if not size_matched_key:
                continue  # 字号不匹配，跳过此规则

            # 检查位置匹配（如果定义了 bbox_range）
            bbox_range = rule.get('bbox_range')
            if bbox_range:
                block_bbox = block.get('bbox', (0, 0, 0, 0))
                if not (block_bbox[2] < bbox_range[0] or
                        block_bbox[0] > bbox_range[2] or
                        block_bbox[3] < bbox_range[1] or
                        block_bbox[1] > bbox_range[3]):
                    score += 5  # 位置匹配加分
            else:
                score += 1  # 如果没有位置约束，则给予小幅加分

            # 如果此规则得分更高，则更新最佳规则
            if score > best_score:
                best_score = score
                best_rule = rule
                block_copy['matched_h_key'] = size_matched_key  # 存储匹配到的 Hx 键

        block_copy['matched_rule'] = best_rule
        block_copy['match_score'] = best_score

        processed_blocks.append(block_copy)

    # --- 第二步：根据规则对 blocks 进行分组和合并 ---
    markdown_elements = []
    i = 0
    num_blocks = len(processed_blocks)

    while i < num_blocks:
        current_block = processed_blocks[i]
        matched_rule = current_block.get('matched_rule')

        # --- 如果没有规则匹配，这可能是需要分组的内容 ---
        # 这是一个简化处理。理想情况下，规则应覆盖所有内容。
        # 我们暂时将其视为默认内容，但理想情况是规则应该是完整的。
        if not matched_rule:
            # 尝试与后续未匹配的 blocks 进行分组
            content_parts = [current_block['full_text']]
            source_indices = [current_block['block_index']]
            j = i + 1
            while j < num_blocks and not processed_blocks[j].get('matched_rule'):
                next_block = processed_blocks[j]
                # 简单的合并条件（可以进一步优化）
                space_before_next = next_block.get('vspace_before', float('inf'))
                median_space_above_next = next_block.get('vspace_before_top5_median', float('inf'))
                typical_line_height = next_block.get('avg_line_height', 10)
                paragraph_break_threshold = max(typical_line_height * 1.8, 15)

                if median_space_above_next > paragraph_break_threshold:
                    break
                content_parts.append(next_block['full_text'])
                source_indices.append(next_block['block_index'])
                j += 1

            # 为未匹配的内容分配一个通用的类型/别名？
            # 或者记录警告，表明规则不完整？
            # 目前，我们仍然需要输出一些内容。
            # 我们分配一个默认值，但这表明规则存在缺口。
            markdown_elements.append({
                'type': 'unmatched_content',  # 标记问题
                'level': None,
                'content': ''.join(content_parts).strip(),
                'source_block_indices': source_indices,
                'rule_alias': 'unmatched_content_alias'  # 标记问题
            })
            i = j
            continue  # 移动到下一个未处理的 block

        # --- 基于规则的处理 ---
        markdown_type = matched_rule.get('markdown_type', 'unknown')
        rule_alias = matched_rule.get('alias')

        # 尝试从匹配的 Hx 键确定层级
        level = None
        h_key = current_block.get('matched_h_key', '')
        if h_key and h_key.startswith('H'):
            try:
                level = int(h_key[1:])
            except ValueError:
                pass

        # --- 基于规则类型的合并逻辑 ---
        # 这部分需要根据您的具体文档结构和规则进行定制。
        # 示例启发式：如果是标题类（H1, H3 等），它可能会开启一个新章节。
        # 其后的内容（匹配某些规则或具有某些属性）可能属于它。

        # 简化示例：
        # 假设像 paragraph_title (H3) 这样的规则后面跟着 paragraph_content (H3) 或类似较小的文字。
        # 我们将尝试合并后续的、未匹配强结构规则的 blocks。

        # 更健壮的方法是在规则本身内为每种规则类型定义分组策略。
        # 例如，{'markdown_type': '...', 'group_with_following': ['paragraph_content', 'content_v1'], ...}
        # 目前，我们使用一个基于 H 级别和间距的简单启发式方法。

        content_parts = [current_block['full_text']]
        source_indices = [current_block['block_index']]
        j = i + 1

        # 启发式：合并后续 blocks 的条件：
        # 1. 它们不匹配“更强”的结构规则（例如，另一个标题）
        # 2. 它们的字号小于等于当前 block 的字号（或在小范围内）
        # 3. 垂直间距较小
        # 这高度依赖于文档结构。

        # 获取当前匹配规则的主要字号范围用于比较
        current_rule_main_h_range = None
        current_h_key = current_block.get('matched_h_key', '')
        if current_h_key:
            current_rule_main_h_range = matched_rule.get(current_h_key)

        should_merge_heuristic = (
                markdown_type in ['paragraph_title', 'doc_title'] or  # 标题通常后面有内容
                (current_rule_main_h_range and current_rule_main_h_range[0] >= 12)  # 任意设定，较大字号可能开启章节
        )

        if should_merge_heuristic:
            while j < num_blocks:
                next_block = processed_blocks[j]
                next_matched_rule = next_block.get('matched_rule')

                # 停止合并的条件：
                # 1. 下一个 block 匹配了规则（结构元素）
                if next_matched_rule:
                    # 可选：允许与特定类型合并（例如，内容合并到标题）
                    # 这需要在规则中定义允许的合并，也许在规则中。
                    # 为了简单起见，目前遇到任何规则匹配就停止。
                    break

                    # 2. 间距过大
                median_space_above_next = next_block.get('vspace_before_top5_median', float('inf'))
                typical_line_height = next_block.get('avg_line_height', 10)
                paragraph_break_threshold = max(typical_line_height * 1.8, 15)
                if median_space_above_next > paragraph_break_threshold:
                    break

                # 3. 字号明显更大（可能是新标题）
                # 此检查可以进一步优化
                next_font_size = next_block.get('max_span_size', 0)
                if next_font_size > (current_block.get('max_span_size', 0) + 2):  # 任意阈值
                    break

                # 如果以上停止条件均未满足，则合并
                content_parts.append(next_block['full_text'])
                source_indices.append(next_block['block_index'])
                j += 1

        # --- 生成最终元素 ---
        # 即使没有发生合并，j 也会是 i+1，所以 source_indices 是正确的。
        markdown_elements.append({
            'type': markdown_type,
            'level': level,
            'content': ''.join(content_parts).strip(),
            'source_block_indices': source_indices,
            'rule_alias': rule_alias
        })

        i = j  # 将索引移动到已处理 blocks 之后

    return markdown_elements


def convert_blocks_to_markdown_structure_v4(blocks_info, rules, include_diagnostics=False):
    """
    将 blocks_info 转换为更贴近文档逻辑结构的中间表示，完全由预定义规则驱动。
    可选择性地包含诊断信息以便调试。

    Args:
        blocks_info: 由 analyze_page_blocks 函数生成的 block 信息列表。
        rules: 预定义的规则列表，每个规则定义了类型、尺寸范围、位置范围等。
        include_diagnostics (bool): 如果为 True，则在输出中包含详细的 block 诊断信息。

    Returns:
        一个字典列表，代表文档的逻辑结构。
    """
    if not blocks_info or not rules:
        return []

    # --- 第一步：预处理 blocks，寻找匹配的规则 ---
    processed_blocks = []
    for i, block in enumerate(blocks_info):
        block_copy = block.copy()

        # 根据字号和位置寻找最佳匹配的规则
        best_rule = None
        best_score = -1  # 分数越高越好

        font_size = block.get('max_span_size', 0)

        for rule in rules:
            score = 0

            # 检查规则中定义的任何 Hx 是否与字号匹配
            size_matched_key = None
            for key, size_range in rule.items():
                if key.startswith('H') and isinstance(size_range, (tuple, list)) and len(size_range) == 2:
                    if size_range[0] <= font_size <= size_range[1]:
                        size_matched_key = key
                        score += 10  # 字号匹配的基础分数
                        # 可选：更接近范围中心的加分？
                        # range_center = (size_range[0] + size_range[1]) / 2
                        # score += 1.0 / (1.0 + abs(font_size - range_center))
                        break  # 匹配到第一个符合条件的 Hx 后停止

            if not size_matched_key:
                continue  # 字号不匹配，跳过此规则

            # 检查位置匹配（如果定义了 bbox_range）
            bbox_range = rule.get('bbox_range')
            if bbox_range:
                block_bbox = block.get('bbox', (0, 0, 0, 0))
                if not (block_bbox[2] < bbox_range[0] or
                        block_bbox[0] > bbox_range[2] or
                        block_bbox[3] < bbox_range[1] or
                        block_bbox[1] > bbox_range[3]):
                    score += 5  # 位置匹配加分
            else:
                score += 1  # 如果没有位置约束，则给予小幅加分

            # 如果此规则得分更高，则更新最佳规则
            if score > best_score:
                best_score = score
                best_rule = rule
                block_copy['matched_h_key'] = size_matched_key  # 存储匹配到的 Hx 键

        block_copy['matched_rule'] = best_rule
        block_copy['match_score'] = best_score

        processed_blocks.append(block_copy)

    # --- 第二步：根据规则对 blocks 进行分组和合并 ---
    markdown_elements = []
    i = 0
    num_blocks = len(processed_blocks)

    while i < num_blocks:
        current_block = processed_blocks[i]
        matched_rule = current_block.get('matched_rule')

        # --- 如果没有规则匹配，这可能是需要分组的内容 ---
        if not matched_rule:
            content_parts = [current_block['full_text']]
            source_indices_info = [{
                'index': current_block['block_index'],
                'font_size': current_block.get('max_span_size'),
                'bbox': current_block.get('bbox'),
                'vspace_before': current_block.get('vspace_before'),
                'vspace_before_median': current_block.get('vspace_before_top5_median'),
                'avg_line_height': current_block.get('avg_line_height')
            }] if include_diagnostics else [current_block['block_index']]
            j = i + 1
            while j < num_blocks and not processed_blocks[j].get('matched_rule'):
                next_block = processed_blocks[j]
                content_parts.append(next_block['full_text'])

                if include_diagnostics:
                    source_indices_info.append({
                        'index': next_block['block_index'],
                        'font_size': next_block.get('max_span_size'),
                        'bbox': next_block.get('bbox'),
                        'vspace_before': next_block.get('vspace_before'),
                        'vspace_before_median': next_block.get('vspace_before_top5_median'),
                        'avg_line_height': next_block.get('avg_line_height')
                    })
                else:
                    source_indices_info.append(next_block['block_index'])
                j += 1

            markdown_elements.append({
                'type': 'unmatched_content',
                'level': None,
                'content': ''.join(content_parts).strip(),
                'source_block_indices': source_indices_info,  # 使用详细信息或索引列表
                'rule_alias': 'unmatched_content_alias'
            })
            i = j
            continue

        # --- 基于规则的处理 ---
        markdown_type = matched_rule.get('markdown_type', 'unknown')
        rule_alias = matched_rule.get('alias')

        # 尝试从匹配的 Hx 键确定层级
        level = None
        h_key = current_block.get('matched_h_key', '')
        if h_key and h_key.startswith('H'):
            try:
                level = int(h_key[1:])
            except ValueError:
                pass

        # --- 基于规则类型的合并逻辑 ---
        content_parts = [current_block['full_text']]
        # 初始化 source_indices_info 列表，包含当前块的信息
        source_indices_info = [{
            'index': current_block['block_index'],
            'font_size': current_block.get('max_span_size'),
            'bbox': current_block.get('bbox'),
            'vspace_before': current_block.get('vspace_before'),
            'vspace_before_median': current_block.get('vspace_before_top5_median'),
            'avg_line_height': current_block.get('avg_line_height'),
            'matched_rule_type': markdown_type,  # 记录引发合并的规则类型
            'match_score': current_block.get('match_score')
        }] if include_diagnostics else [current_block['block_index']]

        j = i + 1

        # 获取当前匹配规则的主要字号范围用于比较
        current_rule_main_h_range = None
        current_h_key = current_block.get('matched_h_key', '')
        if current_h_key:
            current_rule_main_h_range = matched_rule.get(current_h_key)

        # 启发式：决定是否尝试合并后续内容
        should_merge_heuristic = (
                markdown_type in ['paragraph_title', 'doc_title'] or
                (current_rule_main_h_range and current_rule_main_h_range[0] >= 12)
        )

        if should_merge_heuristic:
            while j < num_blocks:
                next_block = processed_blocks[j]
                next_matched_rule = next_block.get('matched_rule')

                # 停止合并的条件：
                # 1. 下一个 block 匹配了规则（结构元素）
                if next_matched_rule:
                    break

                    # 2. 间距过大
                median_space_above_next = next_block.get('vspace_before_top5_median', float('inf'))
                typical_line_height = next_block.get('avg_line_height', 10)
                paragraph_break_threshold = max(typical_line_height * 1.8, 15)
                if median_space_above_next > paragraph_break_threshold:
                    break

                # 3. 字号明显更大（可能是新标题）
                next_font_size = next_block.get('max_span_size', 0)
                if next_font_size > (current_block.get('max_span_size', 0) + 2):
                    break

                # 如果以上停止条件均未满足，则合并
                content_parts.append(next_block['full_text'])
                if include_diagnostics:
                    source_indices_info.append({
                        'index': next_block['block_index'],
                        'font_size': next_block.get('max_span_size'),
                        'bbox': next_block.get('bbox'),
                        'vspace_before': next_block.get('vspace_before'),
                        'vspace_before_median': next_block.get('vspace_before_top5_median'),
                        'avg_line_height': next_block.get('avg_line_height'),
                        'merged_because': f"Following {markdown_type}, space OK, size OK"  # 记录合并原因
                    })
                else:
                    source_indices_info.append(next_block['block_index'])
                j += 1

        # --- 生成最终元素 ---
        markdown_elements.append({
            'type': markdown_type,
            'level': level,
            'content': ''.join(content_parts).strip(),
            'source_block_indices': source_indices_info,  # 使用详细信息或索引列表
            'rule_alias': rule_alias
        })

        i = j  # 将索引移动到已处理 blocks 之后

    return markdown_elements


import statistics
import re


def convert_blocks_to_markdown_structure_v5(blocks_info, rules, include_diagnostics=False):
    """
    将 blocks_info 转换为更贴近文档逻辑结构的中间表示，完全由预定义规则驱动。
    可选择性地包含诊断信息以便调试。

    Args:
        blocks_info: 由 analyze_page_blocks 函数生成的 block 信息列表。
        rules: 预定义的规则列表，每个规则定义了类型、尺寸范围、位置范围等。
        include_diagnostics (bool): 如果为 True，则在输出中包含详细的 block 和行诊断信息。

    Returns:
        一个字典列表，代表文档的逻辑结构。
    """
    if not blocks_info or not rules:
        return []

    # --- 第一步：预处理 blocks，寻找匹配的规则 ---
    processed_blocks = []
    for i, block in enumerate(blocks_info):
        block_copy = block.copy()

        # 根据字号和位置寻找最佳匹配的规则
        best_rule = None
        best_score = -1  # 分数越高越好

        font_size = block.get('max_span_size', 0)

        for rule in rules:
            score = 0

            # 检查规则中定义的任何 Hx 是否与字号匹配
            size_matched_key = None
            for key, size_range in rule.items():
                if key.startswith('H') and isinstance(size_range, (tuple, list)) and len(size_range) == 2:
                    if size_range[0] <= font_size <= size_range[1]:
                        size_matched_key = key
                        score += 10  # 字号匹配的基础分数
                        break  # 匹配到第一个符合条件的 Hx 后停止

            if not size_matched_key:
                continue  # 字号不匹配，跳过此规则

            # 检查位置匹配（如果定义了 bbox_range）
            bbox_range = rule.get('bbox_range')
            if bbox_range:
                block_bbox = block.get('bbox', (0, 0, 0, 0))
                if not (block_bbox[2] < bbox_range[0] or
                        block_bbox[0] > bbox_range[2] or
                        block_bbox[3] < bbox_range[1] or
                        block_bbox[1] > bbox_range[3]):
                    score += 5  # 位置匹配加分
            else:
                score += 1  # 如果没有位置约束，则给予小幅加分

            # 如果此规则得分更高，则更新最佳规则
            if score > best_score:
                best_score = score
                best_rule = rule
                block_copy['matched_h_key'] = size_matched_key  # 存储匹配到的 Hx 键

        block_copy['matched_rule'] = best_rule
        block_copy['match_score'] = best_score

        processed_blocks.append(block_copy)

    # --- 第二步：根据规则对 blocks 进行分组和合并 ---
    markdown_elements = []
    i = 0
    num_blocks = len(processed_blocks)

    while i < num_blocks:
        current_block = processed_blocks[i]
        matched_rule = current_block.get('matched_rule')

        # --- 如果没有规则匹配，这可能是需要分组的内容 ---
        if not matched_rule:
            content_parts = [current_block['full_text']]
            # --- 构建诊断信息 ---
            diag_info_list = []
            if include_diagnostics:
                diag_info_list.append(_create_detailed_block_info(current_block))
            else:
                diag_info_list.append(current_block['block_index'])
            # ---
            j = i + 1
            while j < num_blocks and not processed_blocks[j].get('matched_rule'):
                next_block = processed_blocks[j]
                content_parts.append(next_block['full_text'])

                if include_diagnostics:
                    diag_info_list.append(_create_detailed_block_info(next_block))
                else:
                    diag_info_list.append(next_block['block_index'])
                j += 1

            # --- 计算合并后的 bbox (仅在诊断模式下) ---
            aggregated_bbox = None
            if include_diagnostics:
                all_line_bboxes = []
                for diag_info in diag_info_list:  # diag_info is now a dict
                    for line_info in diag_info.get('lines', []):
                        line_bbox = line_info.get('line_bbox')
                        if line_bbox and isinstance(line_bbox, (list, tuple)) and len(line_bbox) == 4:
                            all_line_bboxes.append(line_bbox)
                aggregated_bbox = _calculate_aggregated_bbox(all_line_bboxes)
            # ---

            markdown_elements.append({
                'type': 'unmatched_content',
                'level': None,
                'content': ''.join(content_parts).strip(),
                'source_block_indices': diag_info_list if include_diagnostics else [idx for idx in diag_info_list],  # 简洁起见，非诊断模式仍为索引列表
                'rule_alias': 'unmatched_content_alias',
                **({'aggregated_bbox': aggregated_bbox} if include_diagnostics else {})  # 有条件地添加
            })
            i = j
            continue

        # --- 基于规则的处理 ---
        markdown_type = matched_rule.get('markdown_type', 'unknown')
        rule_alias = matched_rule.get('alias')

        # 尝试从匹配的 Hx 键确定层级
        level = None
        h_key = current_block.get('matched_h_key', '')
        if h_key and h_key.startswith('H'):
            try:
                level = int(h_key[1:])
            except ValueError:
                pass

        # --- 基于规则类型的合并逻辑 ---
        content_parts = [current_block['full_text']]
        # --- 初始化诊断信息列表 ---
        diag_info_list = []
        if include_diagnostics:
            diag_info_list.append(_create_detailed_block_info(current_block, is_leader=True, rule_type=markdown_type, score=current_block.get('match_score')))
        else:
            diag_info_list.append(current_block['block_index'])
        # ---
        j = i + 1

        # 获取当前匹配规则的主要字号范围用于比较
        current_rule_main_h_range = None
        current_h_key = current_block.get('matched_h_key', '')
        if current_h_key:
            current_rule_main_h_range = matched_rule.get(current_h_key)

        # 启发式：决定是否尝试合并后续内容
        should_merge_heuristic = (
                markdown_type in ['paragraph_title', 'doc_title'] or
                (current_rule_main_h_range and current_rule_main_h_range[0] >= 12)
        )

        if should_merge_heuristic:
            while j < num_blocks:
                next_block = processed_blocks[j]
                next_matched_rule = next_block.get('matched_rule')

                # 停止合并的条件：
                # 1. 下一个 block 匹配了规则（结构元素）
                if next_matched_rule:
                    break

                # 2. 间距过大
                median_space_above_next = next_block.get('vspace_before_top5_median', float('inf'))
                typical_line_height = next_block.get('avg_line_height', 10)
                paragraph_break_threshold = max(typical_line_height * 1.8, 15)
                if median_space_above_next > paragraph_break_threshold:
                    break

                # 3. 字号明显更大（可能是新标题）
                next_font_size = next_block.get('max_span_size', 0)
                if next_font_size > (current_block.get('max_span_size', 0) + 2):
                    break

                # 如果以上停止条件均未满足，则合并
                content_parts.append(next_block['full_text'])
                if include_diagnostics:
                    diag_info_list.append(_create_detailed_block_info(next_block, merge_reason=f"Following {markdown_type}, space OK, size OK"))
                else:
                    diag_info_list.append(next_block['block_index'])
                j += 1

        # --- 计算合并后的 bbox (仅在诊断模式下) ---
        aggregated_bbox = None
        if include_diagnostics:
            all_line_bboxes = []
            for diag_info in diag_info_list:  # diag_info is now a dict
                for line_info in diag_info.get('lines', []):
                    line_bbox = line_info.get('line_bbox')
                    if line_bbox and isinstance(line_bbox, (list, tuple)) and len(line_bbox) == 4:
                        all_line_bboxes.append(line_bbox)
            aggregated_bbox = _calculate_aggregated_bbox(all_line_bboxes)
        # ---

        # --- 生成最终元素 ---
        final_element = {
            'rule_alias': rule_alias,
            'type': markdown_type,
            'level': level,
            'content': ''.join(content_parts).strip(),
            'source_block_indices': diag_info_list,  # 在诊断模式下是详细信息列表，否则是索引列表
        }
        if include_diagnostics and aggregated_bbox:
            final_element['aggregated_bbox'] = aggregated_bbox  # 添加合并后的 bbox

        markdown_elements.append(final_element)
        # ---
        i = j  # 将索引移动到已处理 blocks 之后

    return markdown_elements


# --- 辅助函数 ---
def _create_detailed_block_info(block, is_leader=False, rule_type=None, score=None, merge_reason=None):
    """为单个 block 创建详细的诊断信息字典"""
    info = {
        'block_index': block['block_index'],
        'block_font_size': block.get('max_span_size'),
        'block_avg_line_height': block.get('avg_line_height'),
        'block_vspace_before': block.get('vspace_before'),
        'block_vspace_before_median': block.get('vspace_before_top5_median'),
        'block_bbox': block.get('bbox'),  # 原始 block bbox
        'lines': []
    }
    if is_leader:
        info['matched_rule_type'] = rule_type
        info['match_score'] = score
    if merge_reason:
        info['merged_because'] = merge_reason

    # 提取行信息
    lines_in_block = block.get('lines', [])
    for line_obj in lines_in_block:
        # 假设 line_obj 包含 text, bbox, max_line_size 等信息
        # spans 信息也可以包含在这里
        line_info = {
            'line_text': line_obj.get('text', ''),
            'line_bbox': line_obj.get('bbox', []),
            'line_font_size': line_obj.get('max_line_size', None),
            # 'spans': line_obj.get('spans', []) # 可选：包含 span 详情
        }
        info['lines'].append(line_info)

    return info


def _calculate_aggregated_bbox(line_bboxes):
    """根据一系列 line bbox 计算合并后的 bbox"""
    if not line_bboxes:
        return None
    try:
        # 垂直方向：从第一行的第一个字符顶到底部，到最后一个行的最后一个字符底
        # 水平方向：跨越所有行的最小左和最大右 (更稳健的做法可能是取 block 的 bbox x 范围)
        # 这里采用跨所有行 bbox 的 min/max
        min_x = min(bbox[0] for bbox in line_bboxes)
        min_y = min(bbox[1] for bbox in line_bboxes)  # Top of topmost line
        max_x = max(bbox[2] for bbox in line_bboxes)
        max_y = max(bbox[3] for bbox in line_bboxes)  # Bottom of bottommost line
        return [min_x, min_y, max_x, max_y]
    except (ValueError, IndexError):
        return None


if __name__ == '__main__':
    clear_logs()
    input_pdf = TEST_DATA_DIR / "1715339805571.pdf"
    #
    # markdown_output_path = OUTPUT_DATA_DIR / "output_manual.md"
    # pdf_to_markdown_manual(input_pdf, markdown_output_path)

    page_range = (0, 0)
    zoom = 1.0
    filename_without_ext = f"{extract_filename_without_extension(input_pdf)}_v3"

    doc = fitz.open(input_pdf)
    pages = get_pages_from_range(input_pdf, page_range=page_range)
    ocr_results = []
    for page in pages:
        page_num = page.number
        # 获取页内容区域
        clip_rect = pdf_get_content_region(page, zoom_factor=zoom, is_show_log=False)
        block_dict = page.get_text("dict", clip=clip_rect, sort=True)

        # 去除重复的span
        page_dict = deduplicate_spans_in_blocks_with_stats(block_dict, verbose=False)
        extract_json(page_dict, output_path=OUTPUT_DATA_DIR / f"page_{page_num}_page_dict.json")
        # with open(OUTPUT_DATA_DIR / f"page_{page_num}_page_dict.json", "w", encoding="utf-8") as f:
        #     f.write(json.dumps(page_dict, ensure_ascii=False, indent=4))

        blocks_info_data = analyze_page_blocks(page_dict)

        with open(OUTPUT_DATA_DIR / f"page_{page_num}_blocks_info.json", "w", encoding="utf-8") as f:
            f.write(json.dumps(blocks_info_data, ensure_ascii=False, indent=4))

        """方案1"""
        # structured_output = convert_blocks_to_markdown_structure(blocks_info_result)
        # # print(json.dumps(structured_output, indent=2, ensure_ascii=False))
        # with open(OUTPUT_DATA_DIR / f"page_{page_num}_structured.json", "w", encoding="utf-8") as f:
        #     f.write(json.dumps(structured_output, ensure_ascii=False, indent=4))

        """"方案2"""
        rules = [
            {
                'markdown_type': 'doc_header',
                'alias': 'chapter_header',
                'H1': (10, 12),
                'bbox_range': (0, 0, 544, 66),
            },
            {
                'H1': (18, 35),
                'markdown_type': 'doc_title',
                'alias': 'chapter_title',
                'bbox_range': (0, 130, 544, 130),  # 注意：这个范围 y 坐标相同，可能是个横线？需要确认逻辑
            },
            {
                'H3': (12.5, 17.5),
                'markdown_type': 'paragraph_title',
                'alias': 'chapter_title',
                'bbox_range': None,
            },
            {
                'H3': (9.5, 11.5),
                'markdown_type': 'paragraph_content',
                'alias': 'chapter_content',
                'bbox_range': None,
            },
            {
                'H4': (7.5, 8.25),
                'markdown_type': 'content_v1',
                'alias': 'chapter_explanation',
                'bbox_range': (0, 150, 544, 700),
            },
            {
                'H4': (9, 12),
                'markdown_type': 'doc_footer',
                'alias': 'chapter_footer',
                'bbox_range': (0, 700, 544, 754),
            },
        ]
        structured_output_v2 = convert_blocks_to_markdown_structure_v2(blocks_info_data, rules)
        with open(OUTPUT_DATA_DIR / f"page_{page_num}_structured_v2.json", "w", encoding="utf-8") as f:
            f.write(json.dumps(structured_output_v2, ensure_ascii=False, indent=4))

        """"方案3"""
        structured_output_v3 = convert_blocks_to_markdown_structure_v3(blocks_info_data, rules)
        with open(OUTPUT_DATA_DIR / f"page_{page_num}_structured_v3.json", "w", encoding="utf-8") as f:
            f.write(json.dumps(structured_output_v3, ensure_ascii=False, indent=4))

        """"方案4"""
        structured_output_v4 = convert_blocks_to_markdown_structure_v4(blocks_info_data, rules, include_diagnostics=True)  # 包含详细诊断信息：
        with open(OUTPUT_DATA_DIR / f"page_{page_num}_structured_v4.json", "w", encoding="utf-8") as f:
            f.write(json.dumps(structured_output_v4, ensure_ascii=False, indent=4))

        """"方案5"""
        structured_output_v5 = convert_blocks_to_markdown_structure_v5(blocks_info_data, rules, include_diagnostics=True)  # 包含详细诊断信息：
        with open(OUTPUT_DATA_DIR / f"page_{page_num}_structured_v5.json", "w", encoding="utf-8") as f:
            f.write(json.dumps(structured_output_v5, ensure_ascii=False, indent=4))
