import fitz

from app.utils.file_tools import clear_logs, extract_filename_without_extension
from app.utils.pymupdf_tools import get_pages_from_range, pdf_get_content_region, deduplicate_spans_in_blocks_with_stats
from tests.base_test import base_test_case

logger = base_test_case.get_logger(__name__)
TEST_DATA_DIR = base_test_case.test_data_dir
OUTPUT_DATA_DIR = base_test_case.output_data_dir
CONF_DIR = base_test_case.conf_dir
CONF_PIPELINE_DIR = base_test_case.conf_pipeline_dir

from collections import defaultdict


# --- 新增辅助函数：后处理以合并连续同字号行 ---
def post_process_merge_lines_by_size(md_lines_with_info, size_tolerance=0.5):
    """
    对初步生成的 Markdown 行进行后处理，合并连续的、具有相同（或相近）字体大小的非标题行。
    参数:
        md_lines_with_info (list of dict): 包含 'text', 'size', 'is_title' 键的字典列表。
                                           'text' 是行文本, 'size' 是 float 类型的字体大小,
                                           'is_title' 是布尔值，指示是否是标题。
        size_tolerance (float): 判断字体大小是否相等的容忍度。
    返回:
        list of str: 处理后的纯文本行列表。
    """
    if not md_lines_with_info:
        return []

    processed_lines = []
    buffer_lines = []  # 缓存需要检查是否合并的行
    buffer_sizes = []  # 缓存对应行的大小

    def flush_buffer():
        """将缓存中的行合并并添加到最终结果"""
        if buffer_lines:
            # 合并缓存中的所有行，用空格连接
            merged_text = " ".join(buffer_lines).strip()
            if merged_text:  # 只添加非空行
                processed_lines.append(merged_text)
        buffer_lines.clear()
        buffer_sizes.clear()

    for line_info in md_lines_with_info:
        text = line_info['text']
        size = line_info['size']
        is_title = line_info.get('is_title', False)

        # 如果遇到标题或空行，则强制刷新缓存
        if is_title or not text.strip():
            flush_buffer()
            # 标题或空行直接添加
            processed_lines.append(text)
        else:
            # 是普通文本行
            # 检查是否应与缓存中的行合并
            should_merge = False
            if buffer_sizes:
                # 检查当前行大小与缓存中最近一行的大小是否相近
                last_buffered_size = buffer_sizes[-1]
                if abs(size - last_buffered_size) < size_tolerance:
                    should_merge = True

            if should_merge:
                # 加入缓存，等待后续可能的继续合并或最终 flush
                buffer_lines.append(text.strip())  # 连接时去除首尾空格
                buffer_sizes.append(size)
            else:
                # 大小不同，不能合并，先 flush 之前的，然后开始新的缓存
                flush_buffer()
                buffer_lines.append(text.strip())  # 开始新缓存
                buffer_sizes.append(size)

    # 循环结束后，flush 剩余的缓存
    flush_buffer()

    return processed_lines


# --- 修改后的主处理函数 (基于 block 为单位进行分析) ---
def process_page_to_markdown(json_data, fixed_paragraph_threshold=10.0, max_header_level=6, merge_tolerance=0.5):
    """
    将 json_data 的数据处理成 Markdown 格式的文本。
    流程：
     1. 第一遍扫描：收集每个 block 的整体文本和平均 span 字体大小。
     2. 第二遍扫描：基于 block 的平均大小确定标题层级。
     3. 第三遍扫描：生成中间形式的 Markdown 文本（带大小信息），并基于 block 间距处理段落。
     4. 后处理：合并连续的同字号行。
    参数:
     fixed_paragraph_threshold: 用于判断段落间距的固定阈值 (点 pt)。
     max_header_level: 允许的最大 Markdown 标题级别 (默认 H6)。
     merge_tolerance: 用于判断字体大小是否相等的容忍度 (点 pt)。
    """
    # --- Step 1: 收集数据 (以 block 为单位) ---
    # status['block_line'] 数据结构: [{"index": int, "text": str, "avg_size": float}, ...]
    status = {'block_line': []}
    all_valid_sizes = []  # 收集所有有效的 span size 用于全局统计分析
    blocks = json_data.get("blocks", [])
    block_index_counter = 0
    for block in blocks:
        if block.get("type") == 0:  # Text Block
            full_block_text_parts = []
            sizes_in_block = []
            lines = block.get("lines", [])
            for line_obj in lines:
                spans = line_obj.get("spans", [])
                for span in spans:
                    text = span.get("text", "")
                    size = span.get("size", 0)
                    full_block_text_parts.append(text)
                    if size > 0:
                        sizes_in_block.append(size)
                        all_valid_sizes.append(size)  # 收集所有有效大小
            full_block_text = "".join(full_block_text_parts)  # 合并 block 内所有文本
            # 计算该 block 内所有 span 的平均字体大小
            avg_size_this_block = sum(sizes_in_block) / len(sizes_in_block) if sizes_in_block else 0
            # 只有非空 block 才加入分析队列
            if full_block_text.strip():
                status['block_line'].append({
                    "index": block_index_counter,
                    "text": full_block_text,
                    "avg_size": avg_size_this_block
                })
                block_index_counter += 1
            # else: 空 block 通常不参与标题分析，但在生成时可能产生空行
    print(f"调试: 共收集到 {len(status['block_line'])} 个非空文本块用于标题分析。")
    print(f"调试: 收集到的所有有效字体大小: {sorted(all_valid_sizes)}")  # 打印所有大小供参考

    # --- Step 2: 分析数据，确定标题层级 (基于全局字体大小分布 - 支持多级) ---
    title_mapping = {}  # {block_index: header_level}

    if not status['block_line']:
        print("警告：未收集到任何带文本的块。")
    else:
        # 提取所有有效的块平均大小 (用于最终匹配)
        valid_avg_sizes = [item["avg_size"] for item in status['block_line'] if item["avg_size"] > 0]

        if not valid_avg_sizes or not all_valid_sizes:
            print("警告：收集到的块中没有有效的字体大小信息用于标题判断。")
        else:
            # --- 核心逻辑：基于全局字体大小分布进行精细化分组 (支持多级) ---
            # 1. 对所有收集到的 size 进行排序和去重
            sorted_unique_sizes = sorted(list(set(all_valid_sizes)))
            print(f"调试: 去重并排序后的唯一字体大小: {sorted_unique_sizes}")

            if len(sorted_unique_sizes) == 1:
                print("提示: 文档中似乎只有一种主要字体大小。可能没有明显标题。")
                # 不进行特殊标题处理
            else:
                # 2. 寻找大小之间的显著间隔，以此作为分组依据
                size_gaps = []
                for i in range(1, len(sorted_unique_sizes)):
                    gap = sorted_unique_sizes[i] - sorted_unique_sizes[i - 1]
                    size_gaps.append((gap, i))  # (gap_value, index_after_gap)

                size_gaps.sort(key=lambda x: x[0], reverse=True)  # 按 gap 大小降序排序
                print(f"调试: 计算出的大小间隙 (gap, index_after_gap): {size_gaps}")

                # 3. 确定要切分的间隙点 (动态决定分几组)
                min_significant_gap = 0.2  # 点(pt) 可根据需要调整
                significant_gaps = [g for g in size_gaps if g[0] >= min_significant_gap]
                num_groups_limit = max_header_level  # 最多分 max_header_level 组
                significant_gaps = significant_gaps[:num_groups_limit - 1]  # N组需要N-1个间隙点
                significant_gap_indices = sorted([g[1] for g in significant_gaps])  # 按位置升序排序

                print(f"调试: 选取的显著间隙索引位置 (已排序): {significant_gap_indices}")

                # 4. 根据显著间隙将大小分组
                groups = []
                start_idx = 0
                for idx in significant_gap_indices:
                    group = sorted_unique_sizes[start_idx:idx]
                    if group:  # 避免添加空组
                        groups.append(group)
                    start_idx = idx
                # 添加最后一组
                last_group = sorted_unique_sizes[start_idx:]
                if last_group:
                    groups.append(last_group)

                # 5. 为每个组计算一个代表性的大小 (例如平均值或最大值)，并按大小降序排序
                # [(representative_size, original_group), ...]
                temp_group_info = []
                for group in groups:
                    if group:
                        # 使用组内的平均值作为代表性大小
                        representative_size = sum(group) / len(group)
                        temp_group_info.append((representative_size, group))

                # 按代表性大小降序排序
                temp_group_info.sort(key=lambda x: x[0], reverse=True)
                candidate_size_groups_desc = temp_group_info  # 更新局部变量

                print(f"调试: 根据间隙分组并排序后的大小组 (代表大小, 原始大小列表): {candidate_size_groups_desc}")

                # 6. 建立从组到标题级别的映射 (最大的组 -> H1)
                group_index_to_header_level = {}
                for i, _ in enumerate(candidate_size_groups_desc):
                    level = i + 1
                    if level <= max_header_level:
                        group_index_to_header_level[i] = level

                print(f"调试: 组索引到标题级别的映射: {group_index_to_header_level}")

                # 7. 应用到 block 级别的判断
                tolerance = 0.15  # 点(pt) 可根据需要调整

                # 为提高效率，预先构建一个 size 到组索引的快速查找表
                size_to_group_index_map = {}
                for group_idx, (_, original_sizes_in_group) in enumerate(candidate_size_groups_desc):
                    for orig_sz in original_sizes_in_group:
                        size_to_group_index_map[orig_sz] = group_idx

                for item in status['block_line']:
                    block_avg_size = item["avg_size"]
                    assigned_level = 0  # 默认不是标题

                    # 方法：查找 block_avg_size 最接近哪个原始收集到的 size
                    closest_orig_size = min(all_valid_sizes, key=lambda s: abs(s - block_avg_size))
                    if abs(closest_orig_size - block_avg_size) <= tolerance:
                        # 如果最接近的原始 size 属于某个组
                        group_idx_of_closest = size_to_group_index_map.get(closest_orig_size, -1)
                        if group_idx_of_closest != -1:
                            # 并且这个组有映射到标题级别
                            assigned_level = group_index_to_header_level.get(group_idx_of_closest, 0)

                    if assigned_level > 0:
                        title_mapping[item["index"]] = assigned_level

                print(f"调试: 块索引到最终标题级别的映射: {title_mapping}")

    # --- Step 3: 生成中间 Markdown (结合标题映射和块间距段落处理) ---
    # 这一步生成一个带有额外信息（如字体大小、是否标题）的中间列表
    intermediate_md_lines_with_info = []  # [{text: str, size: float, is_title: bool}, ...]
    previous_block_bottom = None
    paragraph_count = 1  # 至少有一个段落
    current_block_info_index = 0  # 指向 status['block_line'] 的指针

    for block in blocks:
        block_type = block.get("type", -1)
        if block_type == 0:  # 只处理文本块
            current_block_bbox = block.get("bbox", [0, 0, 0, 0])
            current_block_top = current_block_bbox[1]
            current_block_bottom = current_block_bbox[3]

            # --- 段落间距判断逻辑 (基于文本块间距) ---
            if previous_block_bottom is not None:
                vertical_gap = current_block_top - previous_block_bottom
                if vertical_gap > fixed_paragraph_threshold:
                    intermediate_md_lines_with_info.append({"text": "", "size": 0.0, "is_title": False})  # 添加空行表示段落分隔
                    paragraph_count += 1

            # --- 处理当前块内的内容 ---
            full_current_block_text_parts = []
            for line_obj in block.get("lines", []):
                for span in line_obj.get("spans", []):
                    full_current_block_text_parts.append(span.get("text", ""))
            full_current_block_text = "".join(full_current_block_text_parts)

            matched_block_info = None
            if current_block_info_index < len(status['block_line']):
                potential_match = status['block_line'][current_block_info_index]
                if potential_match["text"] == full_current_block_text and full_current_block_text.strip():
                    matched_block_info = potential_match
                    current_block_info_index += 1

            # --- 应用标题标记 ---
            header_level_final = 0
            avg_size_for_debug = 0.0
            if matched_block_info and matched_block_info["text"].strip():
                header_level_final = title_mapping.get(matched_block_info["index"], 0)
                avg_size_for_debug = matched_block_info.get("avg_size", 0.0)

            # --- 构建最终文本 (尊重 block 内部 line 结构) ---
            # 移除了复杂的 special_merge_logic 判断，简化为按 line 输出
            lines_in_block = block.get("lines", [])
            for line_obj in lines_in_block:
                line_text_parts = []
                line_sizes = []
                spans = line_obj.get("spans", [])
                for span in spans:
                    text = span.get("text", "")
                    size = span.get("size", 0)
                    line_text_parts.append(text)
                    if size > 0:
                        line_sizes.append(size)

                full_line_text = "".join(line_text_parts).rstrip('\n')

                # 计算该行的代表性大小 (例如平均值)
                avg_line_size = sum(line_sizes) / len(line_sizes) if line_sizes else 0

                # 如果是标题，只给第一行加标题标记和调试信息
                if header_level_final > 0 and line_obj == lines_in_block[0]:
                    header_prefix_md = "#" * header_level_final + " "
                    title_line_md = f"{header_prefix_md}{full_line_text.strip()}"
                    intermediate_md_lines_with_info.append({
                        "text": title_line_md,
                        "size": avg_size_for_debug,  # 使用 block 的平均大小作为标题大小
                        "is_title": True
                    })
                    # 添加包含字体大小的 HTML 注释行 (调试信息)
                    debug_comment_line_md = f"<!-- Size: {avg_size_for_debug:.2f}pt -->"
                    intermediate_md_lines_with_info.append({
                        "text": debug_comment_line_md,
                        "size": avg_size_for_debug,  # 调试行也赋予标题大小
                        "is_title": False  # 调试行不是标题
                    })
                elif full_line_text.strip() or '\n'.join([s.get("text", "") for l in block.get("lines", []) for s in l.get("spans", [])]).count('\n') > len(
                        block.get("lines", [])):  # 保留原始空行逻辑?
                    # 普通行 - 添加行文本和其大小
                    intermediate_md_lines_with_info.append({
                        "text": full_line_text,
                        "size": avg_line_size,  # 使用行的平均大小
                        "is_title": False
                    })

            # --- 更新上一个块的底部坐标 ---
            previous_block_bottom = current_block_bottom

    # --- Step 4: 后处理 - 合并连续同字号行 ---
    final_markdown_lines = post_process_merge_lines_by_size(intermediate_md_lines_with_info, size_tolerance=merge_tolerance)

    # --- 简单统计信息 ---
    stats_simple = {
        "paragraphs": paragraph_count,
        "headings": defaultdict(int),
        "approx_list_items": sum(
            1 for line in final_markdown_lines if line.lstrip().startswith(('-', '*', '+')) or (
                    line.lstrip() and line.lstrip()[0].isdigit() and '.' in line.lstrip()[:5]))
    }

    # 统计各级标题数量 (基于原始 intermediate 数据)
    for level in title_mapping.values():
        if 1 <= level <= max_header_level:
            stats_simple["headings"][f"h{level}"] += 1

    return "\n".join(final_markdown_lines), dict(stats_simple)


# --- 打印统计信息函数 (简化版) ---
def print_statistics(stats):
    """打印简化的统计信息"""
    print("\n========== 文档统计信息 (简化版) ==========")
    print(f"段落数 (Paragraphs): {stats.get('paragraphs', 0)}")
    print("-" * 20)
    print("标题统计:")
    headings = stats.get('headings', {})
    total_headings = sum(headings.values())
    for level, count in headings.items():
        print(f"  {level.upper()}: {count}")
    print(f"  总计标题: {total_headings}")
    print("-" * 20)
    print(f"近似列表项数 (Approx List Items): {stats.get('approx_list_items', 0)}")
    print("=" * 30)


if __name__ == '__main__':
    clear_logs()
    input_pdf = TEST_DATA_DIR / "1715339805571.pdf"
    #
    # markdown_output_path = OUTPUT_DATA_DIR / "output_manual.md"
    # pdf_to_markdown_manual(input_pdf, markdown_output_path)

    page_range = (1, 1)
    zoom = 1.0
    filename_without_ext = f"{extract_filename_without_extension(input_pdf)}_v3"

    doc = fitz.open(input_pdf)
    pages = get_pages_from_range(input_pdf, page_range=page_range)
    ocr_results = []
    for page in pages:
        page_num = page.number
        # 获取页内容区域
        clip_rect = pdf_get_content_region(page, zoom_factor=zoom, is_show_log=False)
        block_dict = page.get_text("dict", clip=clip_rect, sort=True)

        # 去除重复的span
        data = deduplicate_spans_in_blocks_with_stats(block_dict, verbose=False)

        fixed_paragraph_threshold = 1.5

        # --- 关键调用 ---
        markdown_output, statistics_info = process_page_to_markdown(data, fixed_paragraph_threshold=fixed_paragraph_threshold)
        # --- 关键调用结束 ---

        print_statistics(statistics_info)

        print(f"\n--- 生成的 Markdown 草稿 (前20行) ---")
        md_lines = markdown_output.splitlines()
        for i, line in enumerate(md_lines[:20]):
            print(f"{i + 1:2d}: {line}")
        if len(md_lines) > 20:
            print("  ...")
        print("-" * 20)

        markdown_output_path = OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}process_page_to_markdown.md"
        with open(markdown_output_path, "w", encoding="utf-8") as file:
            file.write(markdown_output)
