# -*-coding:utf-8 -*-
"""
批量处理 ROOT_DIR 下所有 PDF（PDF 直接在根目录，无子文件夹）：
- 提取大图、切分子图
- 统一生成一个 real.json（bbox_page_pixel：页面左上原点的像素坐标）
- 子图按“从上到下再从左到右”排序，面板文件复制到新子目录中按新顺序命名
- 可视化（每篇生成 *_with_bbox.pdf 供抽检）
- 可选：复制成功后删除 cut 的源面板图
"""

import os
import json
import sys
import glob
import shutil
import statistics
import traceback
import fitz  # PyMuPDF
from tqdm import tqdm
from config import setting

# ================= 可配置参数 =================
ROOT_DIR = setting.ROOT_DIR        # 例如 /home/.../dataset/real/real_pdf   —— 这里面直接放一堆PDF
MODEL_PATH = setting.MODEL_PATH
PAGE_PX_PER_PT = setting.PAGE_PX_PER_PT # 1.0

USE_GPU = False                    # True 启用 GPU（取决于 extract_panel 实现）
GPU_ID  = "0"

VERBOSE = False                    # True 输出详细日志
DELETE_ORIG_AFTER_COPY = True      # 复制/重命名成功后，删除 cut() 源面板图


# =============== 路径导入 ===============
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir)

from extract_figure import pdf2pic_with_page
from extract_panel import load_model, cut


# =============== 基础工具 ===============
def _find_best_image_rect(pdf_path, page_no, saved_img_w_px, saved_img_h_px):
    """依据 PNG 像素尺寸匹配 PDF 页面中的图像矩形（pt）。"""
    doc = fitz.open(pdf_path)
    page = doc[page_no - 1]
    best = None
    best_score = float('inf')

    for img in page.get_images(full=True):
        xref = img[0]
        img_w = img[2]
        img_h = img[3]
        rects = page.get_image_rects(xref)
        if not rects:
            continue
        abs_diff = abs(img_w - saved_img_w_px) + abs(img_h - saved_img_h_px)
        rel_diff = abs(img_w / max(saved_img_w_px, 1.0) - 1.0) + abs(img_h / max(saved_img_h_px, 1.0) - 1.0)
        score = abs_diff + 500 * rel_diff
        if score < best_score:
            best_score = score
            rect = max(rects, key=lambda r: (r.width * r.height))
            best = (rect, xref)

    doc.close()
    return best  # None or (rect, xref)


def _rect_pt_to_page_pixel_bbox(rect_pt, k):
    return [round(rect_pt.x0 * k, 2),
            round(rect_pt.y0 * k, 2),
            round(rect_pt.x1 * k, 2),
            round(rect_pt.y1 * k, 2)]


def _map_panel_figpx_to_pagepx(fig_rect_pt, saved_img_w_px, saved_img_h_px, panel_in_fig_px, k):
    """panel_in_fig_px 是相对 figure PNG 的像素框 -> 映射为页面像素框（左上原点，y向下）。"""
    fx0_pt, fy0_pt, fx1_pt, fy1_pt = fig_rect_pt.x0, fig_rect_pt.y0, fig_rect_pt.x1, fig_rect_pt.y1
    fig_w_pt = max(1e-6, fx1_pt - fx0_pt)
    fig_h_pt = max(1e-6, fy1_pt - fy0_pt)

    fig_w_page_px = fig_w_pt * k
    fig_h_page_px = fig_h_pt * k

    sx = fig_w_page_px / max(saved_img_w_px, 1.0)
    sy = fig_h_page_px / max(saved_img_h_px, 1.0)

    x0f, y0f, x1f, y1f = map(float, panel_in_fig_px)
    px0 = fx0_pt * k + x0f * sx
    py0 = fy0_pt * k + y0f * sy
    px1 = fx0_pt * k + x1f * sx
    py1 = fy0_pt * k + y1f * sy

    if px0 > px1: px0, px1 = px1, px0
    if py0 > py1: py0, py1 = py1, py0
    return [round(px0, 2), round(py0, 2), round(px1, 2), round(py1, 2)]


# =============== 排序（上到下，再左到右） ===============
def _sort_panels_ltr_ttb(panels_bbox_page_px, row_eps=None):
    """
    输入：每项 dict: {"orig_idx": 原 cut 顺序(1-based), "bbox_page_pixel": [x0,y0,x1,y1]}
    输出：排序后的同结构列表，增加 row_id/col_id（调试用）。
    """
    if not panels_bbox_page_px:
        return []

    centers, heights = [], []
    for it in panels_bbox_page_px:
        x0, y0, x1, y1 = it["bbox_page_pixel"]
        centers.append(((x0 + x1) / 2.0, (y0 + y1) / 2.0))
        heights.append(max(1.0, y1 - y0))
    med_h = statistics.median(heights) if heights else 1.0
    if row_eps is None:
        row_eps = max(8.0, 0.35 * med_h)

    tmp = sorted(
        [(i, it, centers[i][0], centers[i][1]) for i, it in enumerate(panels_bbox_page_px)],
        key=lambda t: t[3]  # center_y
    )

    rows = []
    for t in tmp:
        i, it, cx, cy = t
        if not rows:
            rows.append([t]); continue
        avg_y = sum(p[3] for p in rows[-1]) / len(rows[-1])
        if abs(cy - avg_y) <= row_eps:
            rows[-1].append(t)
        else:
            rows.append([t])

    sorted_items = []
    row_id = 0
    for row in rows:
        row_id += 1
        for col_id, t in enumerate(sorted(row, key=lambda s: s[2]), 1):
            _, it, _, _ = t
            jj = dict(it)
            jj["row_id"] = row_id
            jj["col_id"] = col_id
            sorted_items.append(jj)
    return sorted_items


# =============== 复制 +（可选）删源 ===============
def _safe_copy_then_maybe_delete(src_path, dst_path, allow_delete=False):
    """
    复制 src -> dst；若 allow_delete=True 且校验通过（文件大小一致），则删除 src。
    """
    os.makedirs(os.path.dirname(dst_path), exist_ok=True)

    if not os.path.exists(src_path):
        if VERBOSE:
            print(f"  [warn] 源不存在：{src_path}")
        return False

    if os.path.abspath(src_path) == os.path.abspath(dst_path):
        return True

    if os.path.exists(dst_path):
        os.remove(dst_path)
    shutil.copy2(src_path, dst_path)

    ok = os.path.exists(dst_path)
    if ok:
        try:
            ok = (os.path.getsize(dst_path) == os.path.getsize(src_path))
        except Exception:
            ok = False

    if ok and allow_delete:
        try:
            os.remove(src_path)
        except Exception as e:
            if VERBOSE:
                print(f"  [warn] 删除源失败：{src_path}，{e}")

    return ok


def _write_sorted_panels_copy(panel_dir, name_noext, sorted_items, delete_orig=False):
    """
    将 cut 生成的 {name_noext}_{orig_idx}.png 复制到
    panel/{name_noext}/ 目录下，重命名为 {name_noext}_{new_idx}.png
    （复制成功且校验通过时，可选删除源文件）
    """
    dst_dir = os.path.join(panel_dir, name_noext)
    os.makedirs(dst_dir, exist_ok=True)

    copied = 0
    for new_idx, it in enumerate(sorted_items, 1):
        orig_idx = it["orig_idx"]
        src_path = os.path.join(panel_dir, f"{name_noext}_{orig_idx}.png")
        dst_path = os.path.join(dst_dir, f"{name_noext}_{new_idx}.png")
        ok = _safe_copy_then_maybe_delete(src_path, dst_path, allow_delete=delete_orig)
        if ok:
            copied += 1
        elif VERBOSE:
            print(f"  [warn] 复制失败：{src_path} -> {dst_path}")

    return copied, dst_dir


# =============== 可视化（每篇生成 *_with_bbox.pdf） ===============
def visualize_bbox_on_pdf(pdf_path, pages_data, output_pdf_path):
    k = PAGE_PX_PER_PT
    doc = fitz.open(pdf_path)

    for i in range(len(doc)):
        page = doc[i]
        page_index = i + 1
        if page_index not in pages_data:
            continue

        W_pt, H_pt = page.rect.width, page.rect.height

        # 简要参照
        page.draw_rect(fitz.Rect(36, 36, 200, 120), color=(0, 1, 0), width=2)
        page.insert_text(fitz.Point(40, 30), f"p{page_index}", fontsize=10, color=(0, 1, 0))

        for fig in pages_data[page_index]['figures']:
            fx0_px, fy0_px, fx1_px, fy1_px = fig['bbox_page_pixel']
            fb_pt = [fx0_px / k, fy0_px / k, fx1_px / k, fy1_px / k]
            fb_pt = [
                max(0, min(fb_pt[0], W_pt - 1)),
                max(0, min(fb_pt[1], H_pt - 1)),
                max(1, min(fb_pt[2], W_pt)),
                max(1, min(fb_pt[3], H_pt))
            ]
            page.draw_rect(fitz.Rect(*fb_pt), color=(1, 0, 0), width=3)

            for p in fig['panels']:
                px0, py0, px1, py1 = p['bbox_page_pixel']
                pb_pt = [px0 / k, py0 / k, px1 / k, py1 / k]
                pb_pt = [
                    max(0, min(pb_pt[0], W_pt - 1)),
                    max(0, min(pb_pt[1], H_pt - 1)),
                    max(1, min(pb_pt[2], W_pt)),
                    max(1, min(pb_pt[3], H_pt))
                ]
                page.draw_rect(fitz.Rect(*pb_pt), color=(0, 0, 1), width=2)

    doc.save(output_pdf_path, garbage=4, deflate=True)
    doc.close()


# =============== 单篇处理（返回用于 real.json 的结构） ===============
def process_single_pdf_to_paper_data(paper_dir: str):
    """
    处理一篇论文目录，返回 paper_data（不落地单篇 json）。
    要求目录内存在 {paper_id}.pdf。
    """
    paper_id = os.path.basename(os.path.abspath(paper_dir))
    pdf_file = f"{paper_id}.pdf"
    pdf_path = os.path.join(paper_dir, pdf_file)
    if not os.path.exists(pdf_path):
        return None

    figure_dir = os.path.join(paper_dir, 'figure')
    panel_dir = os.path.join(paper_dir, 'panel')
    os.makedirs(panel_dir, exist_ok=True)
    os.makedirs(figure_dir, exist_ok=True)

    # 1) 提取大图
    image_meta_info = pdf2pic_with_page(pdf_path, figure_dir)
    if not image_meta_info:
        return {
            'paper_id': int(paper_id) if paper_id.isdigit() else hash(paper_id) % (10**10),
            'path': f"dataset/real/real_pdf/{paper_id}/{paper_id}.pdf",
            'paper_level_issues': {'has_issue': False},
            'pages': []
        }

    k = PAGE_PX_PER_PT
    pages_data = {}

    import extract_panel

    # 2) 遍历每张大图，调用 cut()
    for img_info in tqdm(image_meta_info, desc=f"[{paper_id}] figures", leave=False):
        page_no = int(img_info['page_no'])
        fig_path = img_info['fig_path']
        saved_w = float(img_info['fig_scale_x'])
        saved_h = float(img_info['fig_scale_y'])

        if not os.path.exists(fig_path):
            if VERBOSE:
                print(f"  [err] figure PNG 不存在: {fig_path}")
            continue

        if page_no not in pages_data:
            with fitz.open(pdf_path) as d:
                page_rect = d[page_no - 1].rect
            pages_data[page_no] = {
                'page_index': page_no,
                'page_size_pt': {'width': float(page_rect.width), 'height': float(page_rect.height)},
                'figures': []
            }

        match = _find_best_image_rect(pdf_path, page_no, saved_w, saved_h)
        if not match:
            if VERBOSE:
                print("  [warn] 未匹配到该图像的页面矩形，跳过此 figure")
            continue
        fig_rect_pt, _ = match
        fig_bbox_page_pixel = _rect_pt_to_page_pixel_bbox(fig_rect_pt, k)

        # figure_id / 基名
        fig_basename = os.path.basename(fig_path)
        name_noext = fig_basename.rsplit('.', 1)[0]
        parts = name_noext.split('_')
        figure_id_num = int(parts[-1]) if parts and parts[-1].isdigit() else 1

        figure_data = {
            'figure_id': f'{paper_id}_{page_no}_{figure_id_num}',
            'bbox_page_pixel': fig_bbox_page_pixel,
            'path': f"dataset/real/real_pdf/{paper_id}/figure/{fig_basename}",
            'figure_level_issues': {'has_issue': False},
            'panels': []
        }

        # 运行 cut()
        result = cut(fig_path, panel_dir)
        panel_objs = result.get('object', []) if isinstance(result, dict) else []
        det_w = float(result.get('size', {}).get('width', saved_w))
        det_h = float(result.get('size', {}).get('height', saved_h))
        sx_fix = saved_w / max(det_w, 1.0)
        sy_fix = saved_h / max(det_h, 1.0)

        # 转换为页面像素 bbox，保留原 idx 以便排序/复制
        tmp_panels = []
        for idx, obj in enumerate(panel_objs, 1):
            if 'bndbox' not in obj:
                continue
            bb = obj['bndbox']  # 以 detect_labels['size'] 为基准
            x0 = float(bb['xmin']) * sx_fix
            y0 = float(bb['ymin']) * sy_fix
            x1 = float(bb['xmax']) * sx_fix
            y1 = float(bb['ymax']) * sy_fix
            pan_page_px = _map_panel_figpx_to_pagepx(fig_rect_pt, saved_w, saved_h, [x0, y0, x1, y1], k)
            tmp_panels.append({"orig_idx": idx, "bbox_page_pixel": pan_page_px})

        # 排序（上到下，再左到右）
        sorted_items = _sort_panels_ltr_ttb(tmp_panels, row_eps=None)

        # 复制并按新顺序命名（可选删除源）
        _copied, dst_dir = _write_sorted_panels_copy(
            panel_dir, name_noext, sorted_items, delete_orig=DELETE_ORIG_AFTER_COPY
        )

        # 写 panel 项
        for new_idx, it in enumerate(sorted_items, 1):
            rel_path = (f"dataset/real/real_pdf/{paper_id}/panel/"
                        f"{name_noext}/{name_noext}_{new_idx}.png")
            figure_data['panels'].append({
                'panel_id': f'{paper_id}_{page_no}_{figure_id_num}_{new_idx}',
                'bbox_page_pixel': [round(v, 2) for v in it["bbox_page_pixel"]],
                'path': rel_path,
                'panel_level_issues': {'has_issue': False}
            })

        pages_data[page_no]['figures'].append(figure_data)

    # 可视化 PDF（供抽检）
    if any(p['figures'] for p in pages_data.values()):
        out_pdf = os.path.join(paper_dir, f'{paper_id}_with_bbox.pdf')
        visualize_bbox_on_pdf(os.path.join(paper_dir, f"{paper_id}.pdf"), pages_data, out_pdf)

    # 组装 paper_data（注意：paper/path，去掉 source）
    paper_data = {
        'paper_id': int(paper_id) if paper_id.isdigit() else hash(paper_id) % (10**10),
        'path': f"dataset/real/real_pdf/{paper_id}/{paper_id}.pdf",
        'paper_level_issues': {'has_issue': False},
        'pages': list(pages_data.values())
    }
    return paper_data


# =============== 批量主函数（一次加载模型；只写一个 real.json） ===============
def process_all_pdfs_to_real_json(root_dir: str, model_path: str, use_gpu: bool = True, gpu_id: str = "0"):
    # GPU 环境设置（不更改 extract_panel 接口）
    if use_gpu:
        os.environ.setdefault("CUDA_VISIBLE_DEVICES", gpu_id)
        try:
            import torch
            if torch.cuda.is_available():
                print(f"[GPU] 使用 CUDA（可用设备数: {torch.cuda.device_count()}）")
            else:
                print("[GPU] CUDA 不可用，改用 CPU。")
        except Exception:
            print("[GPU] 未检测到 PyTorch，按 CPU 继续（视 extract_panel 实现而定）")
    else:
        os.environ["CUDA_VISIBLE_DEVICES"] = ""
        print("[GPU] 已禁用，强制 CPU（视 extract_panel 实现而定）")

    # 仅加载一次模型
    print("[init] 加载子图切分模型 ...")
    load_model(model_path)
    print("[init] 模型加载完成")

    # === 发现 ROOT_DIR 下的所有 PDF（直接在根目录） ===
    pdfs = sorted(glob.glob(os.path.join(root_dir, "*.pdf")))
    print(f"[scan] 在 {root_dir} 下发现 {len(pdfs)} 个 PDF")
    if pdfs[:5]:
        print("  示例：")
        for p in pdfs[:5]:
            print("   -", p)

    combined_papers = []
    failed = 0

    # 逐个 PDF 处理：为每个 PDF 创建工作目录 ROOT_DIR/<paper_id>，并在其中生成 {paper_id}.pdf 软链
    for pdf_path in tqdm(pdfs, desc="All PDFs"):
        try:
            paper_id = os.path.splitext(os.path.basename(pdf_path))[0]
            work_dir = os.path.join(root_dir, paper_id)
            os.makedirs(work_dir, exist_ok=True)

            want_pdf_name = f"{paper_id}.pdf"
            want_pdf_path = os.path.join(work_dir, want_pdf_name)

            cleanup_link = False
            if os.path.abspath(pdf_path) != os.path.abspath(want_pdf_path):
                try:
                    if os.path.exists(want_pdf_path):
                        os.remove(want_pdf_path)
                    os.symlink(os.path.abspath(pdf_path), want_pdf_path)
                    cleanup_link = True
                except Exception:
                    # 不支持软链时，复制一份
                    if not os.path.exists(want_pdf_path):
                        shutil.copy2(pdf_path, want_pdf_path)

            paper_data = process_single_pdf_to_paper_data(work_dir)

            # 如果是软链，处理完后删软链（复制则保留，下次可复用）
            if cleanup_link:
                try:
                    os.remove(want_pdf_path)
                except Exception:
                    pass

            if paper_data is None:
                failed += 1
                continue

            # 纠正 path（已按 ROOT_DIR/<paper_id>/<paper_id>.pdf 组织，无需改）
            combined_papers.append(paper_data)

        except Exception as e:
            failed += 1
            print(f"[ERR] 处理 {pdf_path} 失败：{e}")
            if VERBOSE:
                traceback.print_exc()

    # 统一写 real.json（只写这一份，位于 ROOT_DIR 上一级）
    out_json = os.path.join(root_dir, "..", "real.json")
    with open(out_json, "w", encoding="utf-8") as f:
        json.dump({"papers": combined_papers}, f, ensure_ascii=False, indent=2)
    print(f"\n[done] 写出合并 JSON -> {out_json}")
    print(f"总PDF数={len(pdfs)}, 失败={failed}")


# =============== 入口 ===============
if __name__ == "__main__":
    if not os.path.exists(ROOT_DIR):
        print(f"错误: ROOT_DIR 不存在: {ROOT_DIR}")
        sys.exit(1)
    if not os.path.exists(MODEL_PATH):
        print(f"错误: 模型文件不存在: {MODEL_PATH}")
        sys.exit(1)

    print(f"开始批量处理...\nROOT_DIR: {ROOT_DIR}\nMODEL_PATH: {MODEL_PATH}\nUSE_GPU: {USE_GPU}, GPU_ID: {GPU_ID}\n"
          f"PAGE_PX_PER_PT: {PAGE_PX_PER_PT}\nDELETE_ORIG_AFTER_COPY: {DELETE_ORIG_AFTER_COPY}\nVERBOSE: {VERBOSE}")
    process_all_pdfs_to_real_json(ROOT_DIR, MODEL_PATH, use_gpu=USE_GPU, gpu_id=GPU_ID)
