# batch_rebuild_pdf_from_fake_panel.py
# -*- coding: utf-8 -*-
"""
批量：把 TEMPERED_FAKE_ROOT 下的假图“拼回 PDF”，生成 forgery 数据集与合并 JSON。
- 按文件名解析原 paper_id（形如 其他_2010072773_5_3.png），对每个 paper 一次性处理其所有假图
- 每张假图在该 paper 的面板集中做相似度匹配（未占用优先），避免重复替换同一面板
- 复制 pdf/figure/panel 到 forgery 目录（paper_id 加后缀），假图存档为“同原面板源图尺寸”
- 向新 PDF 叠加“按 bbox 尺寸裁好的假图字节流”（清晰）
- 只写一个合并 JSON（MERGED_FORGERY_JSON）
- 注册表记录已处理的“原始 paper_id”，便于后续其它模型批量时跳过
"""

import os
import io
import re
import json
import shutil
from typing import List, Tuple, Optional, Dict

import fitz  # PyMuPDF
from PIL import Image, ImageOps
import numpy as np

from config import settings

# ========= 配置 =========
REAL_PDF_ROOT       = settings.REAL_PDF_ROOT
REAL_JSON_PATH      = settings.REAL_JSON_PATH

TEMPERED_FAKE_ROOT  = settings.TEMPERED_FAKE_ROOT  # 假图根（文件名含原 paper_id）
GT_MASK_ROOT        = getattr(settings, "GT_MASK_ROOT", None)  # 可为 None

FORGERY_ROOT        = settings.FORGERY_ROOT        # output/forgery/forgery_pdf
MERGED_FORGERY_JSON = settings.MERGED_FORGERY_JSON # output/forgery/forgery.json

PAGE_PX_PER_PT      = settings.PAGE_PX_PER_PT      # 一般=1.0
SIM_COMPARE_SIZE    = settings.SIM_COMPARE_SIZE    # 如 224
FG_SUFFIX           = settings.FG_SUFFIX           # 如 "FG_004"
GENERATIVE_MODEL    = settings.GENERATIVE_MODEL    # 如 "GPT-Image-1"
FORGERY_METHOD      = settings.FORGERY_METHOD      # 如 "edit"
REGISTRY_PATH       = settings.REGISTRY_PATH       # 已处理过的原始 paper_id 列表
IMG_EXTS            = set(getattr(settings, "IMG_EXTS", [".png", ".jpg", ".jpeg"]))

TRANSFORMS = [
    ("orig",  lambda im: im),
    ("rot90", lambda im: im.rotate(90,  expand=True)),
    ("rot180",lambda im: im.rotate(180, expand=True)),
    ("rot270",lambda im: im.rotate(270, expand=True)),
    ("flipH", lambda im: ImageOps.mirror(im)),
    ("flipV", lambda im: ImageOps.flip(im)),
]

# ========= 通用：安全覆盖保存 PDF =========
def safe_overwrite_pdf(doc: fitz.Document, out_pdf: str) -> None:
    """能增量就增量；否则全量写临时文件然后原子替换，避免 repaired PDF 报错。"""
    try:
        if hasattr(doc, "can_save_incrementally") and doc.can_save_incrementally():
            doc.save(out_pdf, incremental=True, encryption=fitz.PDF_ENCRYPT_KEEP)
            return
        # 某些旧版没有 can_save_incrementally()，先试增量
        doc.save(out_pdf, incremental=True, encryption=fitz.PDF_ENCRYPT_KEEP)
        return
    except Exception:
        tmp = out_pdf + ".tmp"
        doc.save(tmp, garbage=4, deflate=True, encryption=fitz.PDF_ENCRYPT_KEEP)
        try:
            os.replace(tmp, out_pdf)
        except Exception:
            shutil.move(tmp, out_pdf)

# ========= 基础工具 =========
def _center_crop_to_aspect(img: Image.Image, out_w: int, out_h: int) -> Image.Image:
    target_aspect = out_w / float(out_h if out_h != 0 else 1)
    w, h = img.size
    if h == 0:
        return img
    if w / float(h) > target_aspect:
        new_w = int(round(h * target_aspect))
        x0 = max(0, (w - new_w) // 2)
        return img.crop((x0, 0, x0 + new_w, h))
    else:
        new_h = int(round(w / target_aspect)) if target_aspect != 0 else h
        y0 = max(0, (h - new_h) // 2)
        return img.crop((0, y0, w, y0 + new_h))

def crop_and_resize_to_file(ori_img_path: str, dst_img_path: str, out_w: int, out_h: int) -> None:
    with Image.open(ori_img_path) as img:
        img = _center_crop_to_aspect(img, out_w, out_h)
        img = img.convert("RGB").resize((max(1,out_w), max(1,out_h)), Image.Resampling.LANCZOS)
        os.makedirs(os.path.dirname(dst_img_path), exist_ok=True)
        img.save(dst_img_path, quality=95)

def crop_and_resize_to_bytes(ori_img_path: str, out_w: int, out_h: int) -> bytes:
    with Image.open(ori_img_path) as img:
        img = _center_crop_to_aspect(img, out_w, out_h)
        img = img.convert("RGB").resize((max(1,out_w), max(1,out_h)), Image.Resampling.LANCZOS)
        bio = io.BytesIO()
        img.save(bio, format="PNG", optimize=True)
        return bio.getvalue()

def page_pixel_to_pt(bbox_px: List[float]) -> List[float]:
    k = PAGE_PX_PER_PT
    return [bbox_px[0] / k, bbox_px[1] / k, bbox_px[2] / k, bbox_px[3] / k]

def get_image_size(img_path: str) -> Tuple[int, int]:
    with Image.open(img_path) as img:
        return img.size

def _to_gray_resized_square(im: Image.Image, size: int) -> Image.Image:
    w, h = im.size
    if min(w, h) <= 0:
        return Image.new("L", (size, size), 0)
    scale = size / float(min(w, h))
    new_w, new_h = max(1, int(round(w * scale))), max(1, int(round(h * scale)))
    im2 = im.resize((new_w, new_h), Image.Resampling.LANCZOS)
    if new_w > size:
        x0 = (new_w - size) // 2
        im2 = im2.crop((x0, 0, x0 + size, new_h))
    elif new_w < size:
        pad = (size - new_w) // 2
        bg = Image.new(im2.mode, (size, new_h), 0)
        bg.paste(im2, (pad, 0))
        im2 = bg
    if new_h > size:
        y0 = (new_h - size) // 2
        im2 = im2.crop((0, y0, size, y0 + size))
    elif new_h < size:
        pad = (size - new_h) // 2
        bg = Image.new(im2.mode, (size, size), 0)
        bg.paste(im2, (0, pad))
        im2 = bg
    return ImageOps.grayscale(im2)

def mse(a: Image.Image, b: Image.Image) -> float:
    ar = np.asarray(a, dtype="float32"); br = np.asarray(b, dtype="float32")
    diff = ar - br
    return float((diff * diff).mean())

# ========= real.json 索引构建（按 paper 聚合） =========
def _panel_rel_from_node(p: dict) -> Optional[str]:
    return p.get("path")  # 新结构

def resolve_panel_abs_from_rel(panel_rel_path: str) -> str:
    parts = panel_rel_path.strip("/").split("/")
    if len(parts) < 5:
        raise ValueError(f"panel_rel_path malformed: {panel_rel_path}")
    paper_id = parts[3]
    tail = parts[4:]
    return os.path.join(REAL_PDF_ROOT, paper_id, *tail)

def build_real_index(json_path: str) -> Dict[str, dict]:
    with open(json_path, "r", encoding="utf-8") as f:
        data = json.load(f)

    idx: Dict[str, dict] = {}
    for paper in data.get("papers", []):
        pid = str(paper.get("paper_id"))
        entry = {"paper": paper, "panels": [], "figs": []}
        for pi, page in enumerate(paper.get("pages", [])):
            for fi, fig in enumerate(page.get("figures", [])):
                entry["figs"].append({
                    "rel": fig.get("path"),
                    "bbox": fig.get("bbox_page_pixel", []),
                })
                for si, pan in enumerate(fig.get("panels", [])):
                    rel = _panel_rel_from_node(pan)
                    if not rel:
                        continue
                    abs_p = resolve_panel_abs_from_rel(rel)
                    entry["panels"].append({
                        "abs": abs_p,
                        "bbox": pan.get("bbox_page_pixel", []),
                        "page_idx": pi,
                        "fig_idx": fi,
                        "pan_idx": si,
                    })
        idx[pid] = entry
    return idx

# ========= 命名 / 路径重写（加后缀） =========
def with_fg_suffix_paper_id(paper_id: str, fg_suffix: str) -> str:
    if paper_id.endswith(f"_{fg_suffix}"):
        return paper_id
    return f"{paper_id}_{fg_suffix}"

def rename_basename_with_suffix(basename_noext: str, paper_id: str, paper_id_fg: str) -> str:
    if basename_noext.startswith(paper_id + "_"):
        return basename_noext.replace(paper_id + "_", paper_id_fg + "_", 1)
    return f"{paper_id_fg}_{basename_noext}"

def rewrite_paths_to_forgery(paper: dict, fg_suffix: str, forgery_root_unused: str) -> dict:
    paper_id_real = str(paper.get("paper_id"))
    paper_id_fg   = with_fg_suffix_paper_id(paper_id_real, fg_suffix)

    out_pages = []
    for pg in paper.get("pages", []):
        new_figs = []
        for fig in pg.get("figures", []):
            fig_rel = fig.get("path")
            fig_base = os.path.basename(fig_rel) if fig_rel else ""
            fig_noext, fig_ext = os.path.splitext(fig_base)
            fig_noext_fg = rename_basename_with_suffix(fig_noext, paper_id_real, paper_id_fg)
            fig_file_fg  = fig_noext_fg + fig_ext
            new_fig_path = f"dataset/forgery/forgery_pdf/{paper_id_fg}/figure/{fig_file_fg}"

            old_fid = str(fig.get("figure_id", ""))
            if old_fid.startswith(paper_id_real + "_"):
                new_fid = old_fid.replace(paper_id_real + "_", paper_id_fg + "_", 1)
            else:
                new_fid = f"{paper_id_fg}_{old_fid}"

            new_panels = []
            for pan in fig.get("panels", []):
                pan_rel = pan.get("path")
                pan_base = os.path.basename(pan_rel) if pan_rel else ""
                pan_noext, pan_ext = os.path.splitext(pan_base)
                pan_subdir_fg = fig_noext_fg
                pan_noext_fg  = rename_basename_with_suffix(pan_noext, paper_id_real, paper_id_fg)
                pan_file_fg   = pan_noext_fg + pan_ext
                new_panels.append({
                    "panel_id": pan.get("panel_id", "").replace(paper_id_real, paper_id_fg),
                    "bbox_page_pixel": pan.get("bbox_page_pixel", []),
                    "path": f"dataset/forgery/forgery_pdf/{paper_id_fg}/panel/{pan_subdir_fg}/{pan_file_fg}",
                    "panel_level_issues": {"has_issue": False, "issues": []}
                })

            new_figs.append({
                "figure_id": new_fid,
                "bbox_page_pixel": fig.get("bbox_page_pixel", []),
                "path": new_fig_path,
                "figure_level_issues": fig.get("figure_level_issues", {"has_issue": False}),
                "panels": new_panels
            })

        out_pages.append({
            "page_index": pg.get("page_index"),
            "page_size_pt": pg.get("page_size_pt", {}),
            "figures": new_figs
        })

    out_paper = {
        "paper_id": paper_id_fg,
        "path": f"dataset/forgery/forgery_pdf/{paper_id_fg}/{paper_id_fg}.pdf",
        "paper_level_issues": paper.get("paper_level_issues", {"has_issue": False}),
        "pages": out_pages
    }
    return out_paper

# ========= 相似匹配（限制在单个 paper 内；支持“未占用优先”） =========
def find_best_panel_for_fake_in_paper(fake_path: str,
                                      paper_panels: List[dict],
                                      used_abs_paths: set) -> Optional[dict]:
    if not paper_panels:
        return None
    with Image.open(fake_path) as rim:
        real_variants = [(name, _to_gray_resized_square(func(rim), SIM_COMPARE_SIZE)) for name, func in TRANSFORMS]
    best = None
    best_score = float("inf")
    candidates = [p for p in paper_panels if p["abs"] not in used_abs_paths] or paper_panels
    for p in candidates:
        pth = p["abs"]
        if not os.path.exists(pth):
            continue
        try:
            with Image.open(pth) as pim:
                pvar = _to_gray_resized_square(pim, SIM_COMPARE_SIZE)
                for _, rvar in real_variants:
                    score = mse(rvar, pvar)
                    if score < best_score:
                        best_score = score
                        best = p
        except Exception:
            continue
    return best

# ========= 注册表 =========
def load_registry(path):
    if os.path.exists(path):
        with open(path,"r",encoding="utf-8") as f:
            return json.load(f)
    return {"processed_papers":[]}

def save_registry(reg,path):
    os.makedirs(os.path.dirname(path), exist_ok=True)
    with open(path,"w",encoding="utf-8") as f:
        json.dump(reg,f,ensure_ascii=False,indent=2)

def is_processed(pid,reg): return pid in reg.get("processed_papers",[])
def mark_processed(pid,reg):
    if pid not in reg["processed_papers"]:
        reg["processed_papers"].append(pid)

# ========= 假图分组 =========
_PAPER_ID_RE = re.compile(r".*?_(\d{5,})_\d+_\d+\.[A-Za-z]+$")  # 从文件名提取 2010072773

def group_fake_images_by_paper(root: str) -> Dict[str, List[str]]:
    groups: Dict[str, List[str]] = {}
    for dirpath, _, files in os.walk(root):
        for fn in files:
            if os.path.splitext(fn)[1].lower() not in IMG_EXTS:
                continue
            m = _PAPER_ID_RE.match(fn)
            if not m:
                continue
            pid = m.group(1)
            abs_path = os.path.join(dirpath, fn)
            groups.setdefault(pid, []).append(abs_path)
    return groups

# ========= 单篇应用造假 =========
def apply_forgery_for_one_paper(paper_id_real: str,
                                fake_paths: List[str],
                                real_index: Dict[str, dict]) -> Optional[dict]:
    if paper_id_real not in real_index:
        print(f"[skip] paper {paper_id_real} not in real.json")
        return None

    entry = real_index[paper_id_real]
    paper_real = entry["paper"]
    panels     = entry["panels"]

    paper_id_fg = with_fg_suffix_paper_id(paper_id_real, FG_SUFFIX)
    out_dir = os.path.join(FORGERY_ROOT, paper_id_fg)
    os.makedirs(out_dir, exist_ok=True)

    real_pdf = os.path.join(REAL_PDF_ROOT, paper_id_real, f"{paper_id_real}.pdf")
    if not os.path.exists(real_pdf):
        print(f"[ERR] missing pdf: {real_pdf}")
        return None
    out_pdf = os.path.join(out_dir, f"{paper_id_fg}.pdf")
    shutil.copy2(real_pdf, out_pdf)

    # 复制 figure / panel
    real_fig_dir = os.path.join(REAL_PDF_ROOT, paper_id_real, "figure")
    real_pan_dir = os.path.join(REAL_PDF_ROOT, paper_id_real, "panel")
    if os.path.isdir(real_fig_dir):
        for fn in os.listdir(real_fig_dir):
            src = os.path.join(real_fig_dir, fn)
            if not os.path.isfile(src): continue
            name, ext = os.path.splitext(fn)
            name_fg   = rename_basename_with_suffix(name, paper_id_real, paper_id_fg)
            dst = os.path.join(out_dir, "figure", name_fg + ext)
            os.makedirs(os.path.dirname(dst), exist_ok=True
            )
            shutil.copy2(src, dst)
    if os.path.isdir(real_pan_dir):
        for sub in os.listdir(real_pan_dir):
            sub_path = os.path.join(real_pan_dir, sub)
            if not os.path.isdir(sub_path): continue
            sub_fg = rename_basename_with_suffix(sub, paper_id_real, paper_id_fg)
            dst_sub = os.path.join(out_dir, "panel", sub_fg)
            os.makedirs(dst_sub, exist_ok=True)
            for fn in os.listdir(sub_path):
                src = os.path.join(sub_path, fn)
                if not os.path.isfile(src): continue
                name, ext = os.path.splitext(fn)
                name_fg   = rename_basename_with_suffix(name, paper_id_real, paper_id_fg)
                dst = os.path.join(dst_sub, name_fg + ext)
                shutil.copy2(src, dst)

    paper_out = rewrite_paths_to_forgery(paper_real, FG_SUFFIX, FORGERY_ROOT)

    used_abs = set()
    doc = fitz.open(out_pdf)

    for fake_img in fake_paths:
        best = find_best_panel_for_fake_in_paper(fake_img, panels, used_abs)
        if not best:
            print(f"[warn] no panel matched for {os.path.basename(fake_img)} in paper {paper_id_real}")
            continue

        used_abs.add(best["abs"])
        bbox_px = best["bbox"]
        w_overlay = int(round(bbox_px[2] - bbox_px[0]))
        h_overlay = int(round(bbox_px[3] - bbox_px[1]))
        if w_overlay <= 0 or h_overlay <= 0:
            print(f"[warn] bad bbox for {os.path.basename(best['abs'])}")
            continue

        # 伪造panel落盘路径
        orig_fig_noext = os.path.basename(os.path.dirname(best["abs"]))
        new_fig_noext  = rename_basename_with_suffix(orig_fig_noext, paper_id_real, paper_id_fg)
        orig_file      = os.path.basename(best["abs"])
        orig_noext, ext = os.path.splitext(orig_file)
        new_noext      = rename_basename_with_suffix(orig_noext, paper_id_real, paper_id_fg)
        out_panel_abs  = os.path.join(out_dir, "panel", new_fig_noext, new_noext + ext)

        # 存档（同原尺寸）
        src_w, src_h = get_image_size(best["abs"])
        crop_and_resize_to_file(fake_img, out_panel_abs, src_w, src_h)

        # PDF 覆盖（使用 bbox 尺寸字节流）
        bbox_pt = page_pixel_to_pt(bbox_px)
        overlay_bytes = crop_and_resize_to_bytes(fake_img, w_overlay, h_overlay)
        page_idx = best["page_idx"]
        page = doc[paper_real["pages"][page_idx]["page_index"] - 1]  # page_index 是1-based
        rect = fitz.Rect(*bbox_pt)
        page.insert_image(rect, stream=overlay_bytes, overlay=True, keep_proportion=False)

        # issues
        fg_idx = best["fig_idx"]; pn_idx = best["pan_idx"]
        mod_pan = paper_out["pages"][page_idx]["figures"][fg_idx]["panels"][pn_idx]
        mask_rel = mod_pan["path"].replace("/panel/", "/mask/")
        mask_abs = os.path.join(FORGERY_ROOT, mask_rel.split("dataset/forgery/forgery_pdf/")[-1])
        os.makedirs(os.path.dirname(mask_abs), exist_ok=True)

        mask_written = False
        if GT_MASK_ROOT:
            mask_src = fake_img.replace(os.sep + "tempered" + os.sep, os.sep + "gt" + os.sep)
            if os.path.exists(mask_src):
                try:
                    with Image.open(mask_src) as m:
                        m = m.convert("L").resize((w_overlay, h_overlay), Image.Resampling.NEAREST)
                        m.save(mask_abs)
                        mask_written = True
                except Exception:
                    pass

        mod_pan["panel_level_issues"] = {
            "has_issue": True,
            "issues": [{
                "issue_id": FG_SUFFIX,
                "scope": "within",
                "issue_type": "forgery",
                "issue_subtype": FORGERY_METHOD,
                "evidence": {
                    "generative_model": GENERATIVE_MODEL,
                    "forgery_method": FORGERY_METHOD,
                    "mask_path": mask_rel if mask_written else ""
                }
            }]
        }

    # —— 关键：安全覆盖保存，避免 repaired PDF 报错中断 —— 
    safe_overwrite_pdf(doc, out_pdf)
    doc.close()
    print(f"[OK] paper {paper_id_real} -> {paper_id_fg}")

    return paper_out

# ========= 批量 =========
def run_batch():
    os.makedirs(FORGERY_ROOT, exist_ok=True)

    # 1) 索引 real.json
    real_index = build_real_index(REAL_JSON_PATH)

    # 2) 分组假图
    groups = group_fake_images_by_paper(TEMPERED_FAKE_ROOT)
    if not groups:
        print(f"[scan] no tempered images under {TEMPERED_FAKE_ROOT}")
        return

    # 3) 注册表
    registry = load_registry(REGISTRY_PATH)

    # 4) 读取已有 forgery.json（支持续写）
    merged = {"papers": []}
    if os.path.exists(MERGED_FORGERY_JSON):
        try:
            with open(MERGED_FORGERY_JSON, "r", encoding="utf-8") as f:
                merged = json.load(f)
        except Exception:
            pass
    existing_ids = set(str(p.get("paper_id")) for p in merged.get("papers", []))

    # 5) 逐篇处理（每篇成功后立刻 checkpoint 到 JSON 与注册表）
    for pid, fake_list in sorted(groups.items()):
        if is_processed(pid, registry):
            print(f"[skip] already processed paper {pid}")
            continue

        try:
            paper_out = apply_forgery_for_one_paper(pid, sorted(fake_list), real_index)
            if paper_out is None:
                continue

            # 合并 JSON（去重）
            if str(paper_out["paper_id"]) not in existing_ids:
                merged["papers"].append(paper_out)
                existing_ids.add(str(paper_out["paper_id"]))

            # —— 立刻 checkpoint：保存 merged 与 registry —— 
            os.makedirs(os.path.dirname(MERGED_FORGERY_JSON), exist_ok=True)
            with open(MERGED_FORGERY_JSON, "w", encoding="utf-8") as f:
                json.dump(merged, f, ensure_ascii=False, indent=2)

            mark_processed(pid, registry)
            save_registry(registry, REGISTRY_PATH)

        except Exception as e:
            # 单篇失败，记录并继续
            print(f"[ERR] paper {pid}: {e}")

    print(f"[done] merged -> {MERGED_FORGERY_JSON}, total {len(merged['papers'])} papers")

if __name__ == "__main__":
    run_batch()
