import fitz

from app.utils.file_tools import clear_logs, extract_filename_without_extension
from app.utils.pymupdf_tools import get_pages_from_range, pdf_get_content_region, deduplicate_spans_in_blocks_with_stats
from tests.base_test import base_test_case

logger = base_test_case.get_logger(__name__)
TEST_DATA_DIR = base_test_case.test_data_dir
OUTPUT_DATA_DIR = base_test_case.output_data_dir
CONF_DIR = base_test_case.conf_dir
CONF_PIPELINE_DIR = base_test_case.conf_pipeline_dir

import fitz  # PyMuPDF
from collections import defaultdict
import statistics


# --- 1. 全局统计分析 ---
def analyze_page_dict(page_dict):
    """
    分析页面字典，收集用于判断标题和段落的统计数据。
    """
    font_sizes = []
    line_heights = []
    fonts = set()

    for block in page_dict.get("blocks", []):
        if block['type'] == 0:  # 仅处理文本块
            for line in block.get("lines", []):
                line_height = line["bbox"][3] - line["bbox"][1]
                line_heights.append(line_height)
                for span in line.get("spans", []):
                    font_sizes.append(span["size"])
                    fonts.add(span["font"])

    # 计算统计数据
    stats = {}
    if font_sizes:
        stats['avg_font_size'] = sum(font_sizes) / len(font_sizes)
        stats['median_font_size'] = statistics.median(font_sizes)
        stats['max_font_size'] = max(font_sizes)
        stats['min_font_size'] = min(font_sizes)
        # 可以添加更多统计，比如最常见的字体大小 mode_font_size
        try:
            stats['mode_font_size'] = statistics.mode(font_sizes)
        except statistics.StatisticsError:
            # 如果没有唯一众数，取中位数或平均值
            stats['mode_font_size'] = stats['median_font_size']

    if line_heights:
        stats['avg_line_height'] = sum(line_heights) / len(line_heights)
        stats['median_line_height'] = statistics.median(line_heights)

    stats['fonts'] = list(fonts)

    # 简单启发式：假设最大或次大的字体大小是标题
    sorted_unique_sizes = sorted(set(font_sizes), reverse=True)
    if len(sorted_unique_sizes) >= 2:
        stats['likely_header_size'] = sorted_unique_sizes[0]  # 最大作为主标题候选
        stats['likely_subheader_size'] = sorted_unique_sizes[1]  # 次大作为副标题候选
    elif len(sorted_unique_sizes) == 1:
        stats['likely_header_size'] = sorted_unique_sizes[0]
        stats['likely_subheader_size'] = sorted_unique_sizes[0]  # 或者设为 None
    else:
        stats['likely_header_size'] = stats.get('max_font_size', 12)
        stats['likely_subheader_size'] = stats.get('median_font_size', 10)

    print(f"Analyzed Stats: {stats}")
    return stats


# --- 2. 初步结构识别 ---
def identify_structure(blocks, stats, size_tolerance=0.5):
    """
    初步识别每个 block 是标题还是段落。
    返回一个结构化列表，每个元素代表一行或多行的内容和类型。
    """
    structured_elements = []
    current_paragraph_spans = []  # 临时存储当前段落的所有 spans
    last_element_type = None

    def flush_paragraph():
        """将当前累积的段落 spans 合并成一个元素"""
        if current_paragraph_spans:
            # 合并文本
            text = "".join(s.get("text", "") for s in current_paragraph_spans)
            # 取第一个span的字体大小作为段落大小参考 (可以更复杂)
            representative_size = current_paragraph_spans[0]["size"] if current_paragraph_spans else 10
            structured_elements.append({
                'type': 'paragraph',
                'text': text.strip(),
                'size': representative_size,
                'spans': current_paragraph_spans  # 保留原始spans用于后续精细处理
            })
            current_paragraph_spans.clear()

    for block in blocks:
        if block['type'] != 0: continue  # 跳过非文本块

        for i, line in enumerate(block.get("lines", [])):
            line_text_parts = []
            line_max_size = 0
            line_spans = line.get("spans", [])

            # 合并 line 内的所有 span 文本和找出最大字体大小
            for span in line_spans:
                line_text_parts.append(span.get("text", ""))
                line_max_size = max(line_max_size, span["size"])

            line_text = "".join(line_text_parts).strip()
            if not line_text: continue  # 跳过空行

            # --- 判断是标题还是段落 ---
            is_header = False
            # 方法1: 基于绝对大小阈值 (需要根据文档调整)
            # if line_max_size > some_threshold:
            #     is_header = True
            # 方法2: 基于统计信息 (更通用)
            if abs(line_max_size - stats.get('likely_header_size', 0)) <= size_tolerance:
                is_header = True
                element_type = 'h1'  # 可以根据大小差异进一步区分 h1/h2
            elif abs(line_max_size - stats.get('likely_subheader_size', 0)) <= size_tolerance:
                is_header = True
                element_type = 'h2'
            else:
                element_type = 'paragraph_candidate'  # 暂定为段落的一部分

        if is_header:
            # 遇到标题前，先处理掉之前累积的段落
            flush_paragraph()

            # 添加标题元素
            structured_elements.append({
                'type': element_type,  # 'h1' or 'h2'
                'text': line_text,
                'size': line_max_size,
                'spans': line_spans  # 保留 spans
            })
            last_element_type = element_type
        else:  # 是段落内容
            # 累积 spans 到当前段落
            current_paragraph_spans.extend(line_spans)
            last_element_type = 'paragraph'

    # 循环结束后，处理最后一个可能未被flush的段落
    flush_paragraph()

    return structured_elements


# --- 3. 基于字体大小的通用合并逻辑 ---
def merge_lines_by_size(structured_elements, stats, size_tolerance=0.5):
    """
    后处理步骤：合并连续的、具有相同(或相近)字体大小的非标题行。
    """
    if not structured_elements:
        return structured_elements

    merged_elements = []
    i = 0
    while i < len(structured_elements):
        current_elem = structured_elements[i]

        # 如果是标题，直接添加
        if current_elem['type'].startswith('h'):
            merged_elements.append(current_elem)
            i += 1
            continue

        # 如果是段落或段落候选，开始寻找可以合并的后续元素
        if current_elem['type'] in ['paragraph', 'paragraph_candidate']:
            # 使用当前元素的代表性大小进行比较
            base_size = current_elem.get('size', stats.get('median_font_size', 10))
            merged_text = current_elem['text']
            merged_spans = current_elem.get('spans', []).copy()  # 复制spans列表

            j = i + 1
            # 查找后续连续的、大小相近的段落/候选段落
            while j < len(structured_elements):
                next_elem = structured_elements[j]
                if next_elem['type'] in ['paragraph', 'paragraph_candidate']:
                    next_size = next_elem.get('size', stats.get('median_font_size', 10))
                    if abs(next_size - base_size) <= size_tolerance:
                        # 合并文本 (考虑是否需要空格?)
                        separator = " " if not merged_text.endswith((" ", "-", ":", ";")) and not next_elem['text'].startswith((" ", "-", ":", ";")) else ""
                        merged_text += separator + next_elem['text']
                        merged_spans.extend(next_elem.get('spans', []))
                        j += 1  # 继续检查下一个
                    else:
                        break  # 大小不同，停止合并
                else:
                    # 遇到标题或其他类型，停止合并
                    break

            # 创建合并后的段落元素
            merged_elements.append({
                'type': 'paragraph',
                'text': merged_text.strip(),  # 最终清理
                'size': base_size,  # 保持基础大小
                'spans': merged_spans  # 更新合并后的 spans
            })

            i = j  # 移动主循环指针

        else:
            # 其他未知类型，也直接添加?
            merged_elements.append(current_elem)
            i += 1

    return merged_elements


# --- 4. 生成 Markdown ---
def generate_markdown(structured_elements):
    """
    将结构化元素转换为 Markdown 字符串。
    """
    markdown_lines = []
    for elem in structured_elements:
        elem_type = elem['type']
        text = elem['text']
        if elem_type == 'h1':
            markdown_lines.append(f"# {text}\n")
        elif elem_type == 'h2':
            markdown_lines.append(f"## {text}\n")
        elif elem_type == 'paragraph':
            # 对段落文本进行基本清理，防止多余的换行影响 markdown 渲染
            clean_text = text.replace("\n", " ").replace("  ", " ").strip()
            if clean_text:  # 只添加非空段落
                markdown_lines.append(f"{clean_text}\n")
        # 可以添加对列表、代码等其他类型的处理
    return "\n".join(markdown_lines)


# --- 主执行流程 ---
def pdf_to_markdown(pdf_path, page_number=0):
    """
    将 PDF 的指定页面转换为 Markdown。
    """
    doc = fitz.open(pdf_path)
    page = doc.load_page(page_number)
    page_dict = page.get_text("dict")

    # 步骤 1: 分析
    stats = analyze_page_dict(page_dict)

    # 步骤 2: 初步识别 (这里可以传入更精细的 tolerance)
    structured_elements = identify_structure(page_dict.get("blocks", []), stats, size_tolerance=0.5)
    print("\n--- After Initial Identification ---")
    for e in structured_elements:
        print(f"Type: {e['type']}, Size: {e['size']:.2f}, Text: '{e['text']}'")

    # 步骤 3: 后处理合并 (可以使用相同的 tolerance 或不同的值)
    merged_elements = merge_lines_by_size(structured_elements, stats, size_tolerance=0.5)
    print("\n--- After Merging Lines ---")
    for e in merged_elements:
        print(f"Type: {e['type']}, Size: {e['size']:.2f}, Text: '{e['text']}'")

    # 步骤 4: 生成 Markdown
    markdown_content = generate_markdown(merged_elements)

    doc.close()
    return markdown_content



if __name__ == '__main__':
    clear_logs()
    input_pdf = TEST_DATA_DIR / "1715339805571.pdf"
    #
    # markdown_output_path = OUTPUT_DATA_DIR / "output_manual.md"
    # pdf_to_markdown_manual(input_pdf, markdown_output_path)

    page_range = (0, 0)
    zoom = 1.0
    filename_without_ext = f"{extract_filename_without_extension(input_pdf)}_v3"

    doc = fitz.open(input_pdf)
    pages = get_pages_from_range(input_pdf, page_range=page_range)
    ocr_results = []
    for page in pages:
        page_num = page.number
        # 获取页内容区域
        clip_rect = pdf_get_content_region(page, zoom_factor=zoom, is_show_log=False)
        block_dict = page.get_text("dict", clip=clip_rect, sort=True)

        # 去除重复的span
        full_page_dict = deduplicate_spans_in_blocks_with_stats(block_dict, verbose=False)

        stats = analyze_page_dict(full_page_dict)
        structured_elements = identify_structure(full_page_dict.get("blocks", []), stats, size_tolerance=0.5)
        merged_elements = merge_lines_by_size(structured_elements, stats, size_tolerance=0.5)
        markdown_output = generate_markdown(merged_elements)
        print("\n--- Final Markdown Output ---")
        print(markdown_output)

        # 或者如果你有一个实际的 PDF 文件
        # pdf_file_path = "your_document.pdf"
        # md_content = pdf_to_markdown(pdf_file_path, page_number=0)
        # print(md_content)
        # with open("output.md", "w", encoding="utf-8") as f:
        #     f.write(md_content)

        markdown_output_path = OUTPUT_DATA_DIR / f"{filename_without_ext}_{page_num:02d}process_page_to_markdown.md"
        with open(markdown_output_path, "w", encoding="utf-8") as file:
            file.write(markdown_output)
