#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
批量调用 MedGemma 逐框核验脚本（原图高分辨率裁剪 → 模型评估）
==========================================================

【改进点】
- 直接从合并后的 JSON 输入读取 meta 信息（不再需要单独的 meta_json 文件）
- 两阶段：原图裁剪→保存→推理
- 输出 JSON 结构与旧版一致，每个 bbox 增加 "crop_path"
- 新增参数: --skip_crop 跳过裁剪，直接使用已有 crop_path（若缺失则跳过该框）
- 默认行为：不加 --skip_crop 时，会先清空裁剪目录再重新裁剪
- 新增统计：平均裁剪大小、目录空间占用、推理结果统计
- 修复：prompt 使用 .format(desc=...) 与 JSON 花括号冲突的问题（已转义 {{ }}）

作者：zym1105
时间：2025-09-20
"""
import shutil
import argparse
import json
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2' 
import math
import re
from typing import Any, Dict, List, Tuple, Union

from PIL import Image
import torch
from transformers import AutoProcessor, AutoModelForImageTextToText
from tqdm import tqdm

# ------------------------
# 默认配置
# ------------------------
DEFAULT_MODEL_ID = "./pretrained/medgemma"
DEFAULT_DTYPE = torch.bfloat16
DEFAULT_TEMPERATURE = 0.0
DEFAULT_TOP_P = 1.0
DEFAULT_MAX_NEW_TOKENS = 220
DEFAULT_TARGET_SIZE = (224, 224)


# ========== 工具函数 ==========
def clamp01(x: float) -> float:
    try:
        return max(0.0, min(1.0, float(x)))
    except Exception:
        return 0.0


def to_bool_or_uncertain(s: Union[str, bool, None]) -> Union[bool, str]:
    """把模型输出转成 True/False/"uncertain" 三类"""
    if isinstance(s, bool):
        return s
    if s is None:
        return "uncertain"

    try:
        s_norm = str(s).strip().lower()
    except Exception:
        return "uncertain"

    if s_norm in ("true", "yes", "y", "correct"):
        return True
    if s_norm in ("false", "no", "n", "incorrect"):
        return False
    return "uncertain"

def map_bbox_224_to_original(
    bbox_224: List[int],
    crop_box: List[int],  # [top, bottom, left, right]
    cropped_size_hw: List[int],  # [H, W]
    target_size_wh: Tuple[int, int] = DEFAULT_TARGET_SIZE,
    original_size_hw: Tuple[int, int] = None,
) -> List[int]:
    """将 224×224 坐标系下的 bbox 映射到原图坐标系（crop_box 为 [top,bottom,left,right]，size 为 [H,W]）"""
    tx, ty = target_size_wh  # (224, 224)
    crop_top, crop_bottom, crop_left, crop_right = [int(v) for v in crop_box]
    ch, cw = cropped_size_hw  # [H, W]
    sx, sy = cw / float(tx), ch / float(ty)

    x1_224, y1_224, x2_224, y2_224 = [float(v) for v in bbox_224]

    ox1 = crop_left + x1_224 * sx
    oy1 = crop_top + y1_224 * sy
    ox2 = crop_left + x2_224 * sx
    oy2 = crop_top + y2_224 * sy

    if original_size_hw is not None:
        H0, W0 = original_size_hw
        ox1 = max(0, min(W0, ox1))
        oy1 = max(0, min(H0, oy1))
        ox2 = max(0, min(W0, ox2))
        oy2 = max(0, min(H0, oy2))

    return [int(round(ox1)), int(round(oy1)), int(round(ox2)), int(round(oy2))]


def expand_and_crop(
    img: Image.Image,
    xyxy: List[int],
    expand_ratio: float = 0.15,
    min_side: int = 256,
    max_side: int = 2048,
    log_info: dict = None,
) -> Image.Image:
    """四周按比例扩边裁剪，并缩放到合适大小；异常时打印上下文并抛出"""
    try:
        w, h = img.size
        x1, y1, x2, y2 = [int(v) for v in xyxy]
        x1, y1 = max(0, x1), max(0, y1)
        x2, y2 = min(w, x2), min(h, y2)
        bw, bh = max(1, x2 - x1), max(1, y2 - y1)

        ex, ey = int(bw * expand_ratio), int(bh * expand_ratio)
        nx1, ny1 = max(0, x1 - ex), max(0, y1 - ey)
        nx2, ny2 = min(w, x2 + ex), min(h, y2 + ey)

        crop = img.crop((nx1, ny1, nx2, ny2))
        cw, ch = crop.size

        if cw <= 0 or ch <= 0:
            raise ValueError("Invalid crop size (zero width/height)")

        scale = 1.0
        if min(cw, ch) < min_side:
            scale = min_side / float(min(cw, ch))
        if max(cw * scale, ch * scale) > max_side:
            scale = max_side / float(max(cw, ch))
        if not math.isclose(scale, 1.0):
            crop = crop.resize((int(round(cw * scale)), int(round(ch * scale))), Image.BICUBIC)

        return crop

    except Exception as e:
        print("\n[ERROR] expand_and_crop failed!")
        try:
            log_txt = json.dumps(log_info, ensure_ascii=False, indent=2) if log_info else "N/A"
        except Exception:
            log_txt = str(log_info)
        print(f"log_info    : {log_txt}")
        print(f"Image size  : {img.size if isinstance(img, Image.Image) else 'N/A'}")
        print(f"Input bbox  : {xyxy}")
        print(f"Expand ratio: {expand_ratio}")
        print(f"Min/Max side: {min_side}/{max_side}")
        raise e


def pick_json(text: str) -> Dict[str, Any]:
    """宽松解析，不依赖严格 JSON，只提取关键信息"""
    if not isinstance(text, str):
        raise ValueError("Expected str input")

    cleaned = text.strip()

    # -------- is_correct --------
    m = re.search(r'"?is_correct"?\s*:\s*([^\s,]+)', cleaned, flags=re.I)
    if m:
        raw_ic = m.group(1).strip().lower().strip('"')
        if raw_ic in ("true", "yes", "y", "correct"):
            is_correct = True
        elif raw_ic in ("false", "no", "n", "incorrect"):
            is_correct = False
        else:
            is_correct = "uncertain"
    else:
        is_correct = "failed"

    # -------- confidence --------
    m = re.search(r'"?confidence"?\s*:\s*([\d\.]+)', cleaned, flags=re.I)
    confidence = float(m.group(1)) if m else -1

    # -------- region_description --------
    m = re.search(r'"?region_description"?\s*:\s*"([^"]*)"', cleaned, flags=re.I)
    region_description = m.group(1).strip() if m else "failed"

    # -------- rationale --------
    m = re.search(r'"?rationale"?\s*:\s*"([^"]*)"', cleaned, flags=re.I)
    rationale = m.group(1).strip() if m else "failed: "+text.strip()

    return {
        "is_correct": is_correct,
        "confidence": confidence,
        "region_description": region_description,
        "rationale": rationale,
    }

# ========== Prompt ==========
SYSTEM_PROMPT = (
    "You are an expert ophthalmic image grader. "
    "Evaluate ONLY the provided crop of a retinal fundus photo. "
    "If the crop is blurry or evidence is insufficient, respond 'uncertain'. "
    "Return a STRICT JSON object with the required fields and nothing else."
)

# 注意：这里的 JSON 花括号全部使用 {{ }} 转义，避免 .format(desc=...) 触发 KeyError
USER_PROMPT_TEMPLATE = (
    "[Task]\n"
    "You will verify a hypothesis about a lesion within THIS CROP ONLY.\n\n"
    "[Hypothesis]\n"
    "\"{desc}\"\n\n"
    "[Output JSON Schema]\n"
    "{{\n"
    "  \"is_correct\": true|false|\"uncertain\",\n"
    "  \"confidence\": <float in [0,1]>,\n"
    "  \"region_description\": \"...\",\n"
    "  \"rationale\": \"...\"\n"
    "}}\n"
    "Return ONLY the JSON object."
)


# ========== 模型类 ==========
class MedGemmaVerifier:
    def __init__(self, model_id=DEFAULT_MODEL_ID, device="auto", dtype=DEFAULT_DTYPE,
                 temperature=DEFAULT_TEMPERATURE, top_p=DEFAULT_TOP_P, max_new_tokens=DEFAULT_MAX_NEW_TOKENS):

        print(f"[INFO] Loading model: {model_id} | device={device} | dtype={dtype}")
        self.model = AutoModelForImageTextToText.from_pretrained(
            model_id,
            torch_dtype=torch.bfloat16,
            device_map="auto",
        )
        self.processor = AutoProcessor.from_pretrained(model_id)
        self.dtype = dtype
        self.temperature = temperature
        self.top_p = top_p
        self.max_new_tokens = max_new_tokens
        print(f"[INFO] Model {model_id} loaded successfully!")

    def infer_one(self, crop: Image.Image, description: str) -> Dict[str, Any]:
        user_text = USER_PROMPT_TEMPLATE.format(desc=str(description))
        messages = [
            {"role": "system", "content": [{"type": "text", "text": SYSTEM_PROMPT}]},
            {"role": "user", "content": [
                {"type": "text", "text": user_text},
                {"type": "image", "image": crop}
            ]}
        ]

        inputs = self.processor.apply_chat_template(
            messages,
            add_generation_prompt=True,
            tokenize=True,
            return_dict=True,
            return_tensors="pt"
        ).to(self.model.device, dtype=self.dtype)   

        input_len = inputs["input_ids"].shape[-1]

        with torch.inference_mode():
            generation = self.model.generate(
                **inputs,
                max_new_tokens=self.max_new_tokens,
                do_sample=False,
                top_p=self.top_p
            )

        decoded = self.processor.decode(generation[0][input_len:], skip_special_tokens=True)
        obj = pick_json(decoded)
        try:
            is_correct = to_bool_or_uncertain(obj.get("is_correct"))
            confidence = clamp01(obj.get("confidence", 0.0))
            region_description = str(obj.get("region_description", "")).strip()
            rationale = str(obj.get("rationale", "")).strip()
        except Exception as e:
            print("[ERROR] Failed to parse model output into structured result")
            print("raw obj:", json.dumps(obj, ensure_ascii=False, indent=2))
            raise ValueError(f"Failed to parse model output: {e}")

        
        return {
            "is_correct": is_correct,
            "confidence": confidence,
            "region_description": region_description,
            "rationale": rationale,
        }


def process_file(
    input_json_path: str,
    output_json_path: str,
    crop_dump_dir: str,
    expand_ratio: float,
    min_crop_side: int,
    max_crop_side: int,
    device: str,
    temperature: float,
    top_p: float,
    max_new_tokens: int,
    model_id: str,
    skip_crop: bool = False,
) -> None:
    """
    两阶段：原图裁剪 → 保存（images/ 子目录 + crops_meta.json） → 推理
    - 当 skip_crop=True：跳过裁剪，直接从 dump_dir/crops_meta.json 读取上次的裁剪结果（含 crop_path/描述等）
    - 当 skip_crop=False：执行裁剪。若 remove_old_crops=True，会清空 images/ 与旧 crops_meta.json

    写出输出 JSON 的结构与旧版保持一致（每个 bbox 附加 "crop_path" 与 "result"）。
    """
    # 基本路径
    images_dir = os.path.join(crop_dump_dir, "images")
    meta_json_path = os.path.join(crop_dump_dir, "crops_meta.json")

    # 读取主输入（即便 skip 也读，用于恢复原有分组与 bbox 顺序）
    with open(input_json_path, "r", encoding="utf-8") as f:
        data = json.load(f)

    # 小工具：对 description 做格式安全处理（防止含 { } 触发 .format 错误）
    def _sanitize_desc(s):
        s = "" if s is None else str(s)
        return s.replace("{", "{{").replace("}", "}}")

    # ---------------------------
    # Step 1: 裁剪阶段（或跳过）
    # ---------------------------
    os.makedirs(crop_dump_dir, exist_ok=True)
    os.makedirs(images_dir, exist_ok=True)

    jobs_meta: List[Dict[str, Any]] = []  # 写入/读取的中间任务元信息
    out_list: List[Dict[str, Any]] = []   # 后续写回输出 JSON 的结构
    total_boxes = sum(len(x.get("bboxes", [])) for x in data)

    if skip_crop:
        # 直接读取历史裁剪元信息
        if not os.path.exists(meta_json_path):
            raise FileNotFoundError(
                f"[ERROR] --skip_crop 指定，但未找到 {meta_json_path}。请先运行一次未跳过裁剪的流程。"
            )
        with open(meta_json_path, "r", encoding="utf-8") as f:
            jobs_meta = json.load(f)

        # 将 jobs_meta 组织成 (image_path, bbox_index) -> meta 的查找表
        from collections import defaultdict
        meta_map = defaultdict(dict)
        for jm in jobs_meta:
            ip = jm.get("image_path")
            bi = jm.get("bbox_index")
            if ip is None or bi is None:
                # 容错：跳过不完整条目
                continue
            meta_map[ip][int(bi)] = jm

        # 组合回 out_list（保持与 input_json 的分组/顺序一致）
        with tqdm(total=total_boxes, desc="Load crops (skip)", unit="box") as pbar:
            for item in data:
                entry = {"image_path": item.get("image_path"), "bboxes": []}
                ipath = entry["image_path"]
                bboxes = item.get("bboxes", [])
                for bi, b in enumerate(bboxes):
                    jm = meta_map.get(ipath, {}).get(bi)
                    if jm is None:
                        # 缺失对应裁剪，标记占位，后面推理时会跳过
                        entry["bboxes"].append({**b, "crop_path": None})
                    else:
                        entry["bboxes"].append({
                            **b,
                            "crop_path": jm.get("crop_path"),
                        })
                    pbar.update(1)
                out_list.append(entry)

        print(f"[INFO] Skip crop: loaded {len(jobs_meta)} crop jobs from {meta_json_path}")

    else:
        # 默认直接清理旧裁剪目录和 meta
        if os.path.exists(images_dir):
            shutil.rmtree(images_dir)
        os.makedirs(images_dir, exist_ok=True)

        if os.path.exists(meta_json_path):
            os.remove(meta_json_path)

        crop_sizes: List[Tuple[int, int]] = []
        with tqdm(total=total_boxes, desc="Cropping", unit="box") as pbar:
            for item in data:
                meta = item.get("meta", {})
                original_path = meta.get("original_path")
                crop_info = meta.get("crop_info", {})
                crop_box = crop_info.get("crop_box")           # 注意：格式为 [top, bottom, left, right]
                cropped_size = crop_info.get("cropped_size")   # [H, W]
                original_size = crop_info.get("original_size") # [H, W] 可用于边界裁剪

                # 跳过缺失元数据的图片（一次性跳过该图所有框）
                if not (original_path and os.path.exists(original_path) and crop_box and cropped_size):
                    pbar.update(len(item.get("bboxes", [])))
                    continue

                try:
                    orig_img = Image.open(original_path).convert("RGB")
                except Exception:
                    pbar.update(len(item.get("bboxes", [])))
                    continue

                entry = {"image_path": item.get("image_path"), "bboxes": []}
                ipath = entry["image_path"]

                for bi, b in enumerate(item.get("bboxes", [])):
                    xyxy_224 = b.get("xyxy")
                    if (not xyxy_224) or len(xyxy_224) != 4:
                        # 无效框
                        pbar.update(1)
                        continue

                    # 224 -> 原图坐标
                    xyxy_orig = map_bbox_224_to_original(
                        bbox_224=xyxy_224,
                        crop_box=crop_box,                   # [top, bottom, left, right]
                        cropped_size_hw=cropped_size,        # [H, W]
                        target_size_wh=DEFAULT_TARGET_SIZE,  # (224, 224)
                        original_size_hw=original_size,      # [H, W] or None
                    )

                    # 做高分辨率裁剪
                    try:
                        crop_img = expand_and_crop(
                            orig_img,
                            xyxy_orig,
                            expand_ratio=expand_ratio,
                            min_side=min_crop_side,
                            max_side=max_crop_side,
                            log_info=item,  # 出错时打印上下文
                        )
                    except Exception:
                        # 该框裁剪失败，跳过
                        pbar.update(1)
                        continue

                    crop_sizes.append(crop_img.size)

                    # 保存裁剪图到 images/ 子目录
                    base = os.path.splitext(os.path.basename(ipath))[0]
                    crop_fname = f"{base}__box{bi:03d}.png"
                    crop_fpath = os.path.abspath(os.path.join(images_dir, crop_fname))
                    try:
                        crop_img.save(crop_fpath, quality=95)
                    except Exception:
                        pbar.update(1)
                        continue

                    # 更新输出结构
                    entry["bboxes"].append({**b, "crop_path": crop_fpath})

                    # 记录本次裁剪的 meta（供下次 --skip_crop 直接使用）
                    jobs_meta.append({
                        "image_path": ipath,
                        "bbox_index": bi,
                        "description": b.get("description", ""),
                        "crop_path": crop_fpath,
                        "xyxy_224": list(map(int, xyxy_224)),
                        "xyxy_orig": xyxy_orig,
                    })

                    pbar.update(1)

                out_list.append(entry)

        # 写出本次裁剪的元信息
        with open(meta_json_path, "w", encoding="utf-8") as f:
            json.dump(jobs_meta, f, ensure_ascii=False, indent=2)

        # 打印裁剪统计
        if crop_sizes:
            avg_w = sum(w for (w, h) in crop_sizes) / len(crop_sizes)
            avg_h = sum(h for (w, h) in crop_sizes) / len(crop_sizes)
            # 只统计 images/ 下的 png 大小
            total_bytes = 0
            for fname in os.listdir(images_dir):
                if fname.lower().endswith(".png"):
                    fp = os.path.join(images_dir, fname)
                    try:
                        total_bytes += os.path.getsize(fp)
                    except Exception:
                        pass
            print(f"[INFO] Crops: {len(crop_sizes)}, avg size: {avg_w:.1f}x{avg_h:.1f}, "
                  f"disk usage: {total_bytes/1024/1024:.2f} MB (in {images_dir})")
        else:
            print("[WARN] No crops were produced.")

    # ---------------------------
    # Step 2: 推理阶段
    # ---------------------------
    verifier = MedGemmaVerifier(
        model_id=model_id,
        device=device,
        temperature=temperature,
        top_p=top_p,
        max_new_tokens=max_new_tokens,
    )

    # 统计：增加 failed
    stats = {"accept": 0, "reject": 0, "uncertain": 0, "failed": 0, "conf_sum": 0.0}

    # 统计总任务数（有 crop_path 的）
    total_jobs = sum(
        sum(1 for b in entry["bboxes"] if "crop_path" in b and b["crop_path"]) 
        for entry in out_list
    )
    ## debug: 限制最大数量，防止误操作
    # MAX_COUNT=5000
    # if total_jobs > MAX_COUNT:
    #     print(f"[WARN] Limiting total jobs to {MAX_COUNT} (from {total_jobs}) for debug.")
    #     total_jobs = MAX_COUNT
    #     out_list = out_list[:MAX_COUNT]
    with tqdm(total=total_jobs, desc="Inferring", unit="crop") as pbar:
        for entry in out_list:
            for bbox in entry["bboxes"]:
                if "crop_path" not in bbox:
                    raise KeyError(f"bbox 缺少 crop_path 字段: {bbox}")

                cpath = bbox["crop_path"]
                if not cpath or not os.path.exists(cpath):
                    print(f"\n[ERROR] Crop file missing: {cpath}")
                    print(json.dumps(bbox, ensure_ascii=False, indent=2))
                    raise FileNotFoundError(f"Missing crop file: {cpath}")

                try:
                    crop_img = Image.open(cpath).convert("RGB")
                except Exception as e:
                    print(f"\n[ERROR] Failed to open crop image: {cpath}")
                    print(f"bbox info: {json.dumps(bbox, ensure_ascii=False, indent=2)}")
                    raise e

                if "description" not in bbox:
                    raise KeyError(f"bbox 缺少 description 字段: {bbox}")
                desc = _sanitize_desc(bbox["description"])

                try:
                    result = verifier.infer_one(crop_img, desc)
                except Exception as e:
                    print(f"\n[ERROR] Model inference failed on crop: {cpath}")
                    print(f"bbox info: {json.dumps(bbox, ensure_ascii=False, indent=2)}")
                    raise e

                # 确保 result 有必要字段
                for k in ["is_correct", "confidence", "region_description", "rationale"]:
                    if k not in result:
                        raise KeyError(f"模型结果缺少 {k}: {result}")

                bbox["result"] = result

                ic = result["is_correct"]
                conf = float(result["confidence"])

                if ic is True:
                    stats["accept"] += 1
                elif ic is False:
                    stats["reject"] += 1
                elif ic == "failed":
                    stats["failed"] += 1
                else:  # "uncertain"
                    stats["uncertain"] += 1

                stats["conf_sum"] += conf
                pbar.update(1)

    # ---------------------------
    # 写出最终结果
    # ---------------------------
    os.makedirs(os.path.dirname(os.path.abspath(output_json_path)), exist_ok=True)
    with open(output_json_path, "w", encoding="utf-8") as f:
        json.dump(out_list, f, ensure_ascii=False, indent=2)

    total_done = stats["accept"] + stats["reject"] + stats["uncertain"] + stats["failed"]
    avg_conf = (stats["conf_sum"] / total_done) if total_done else 0.0
    print(f"[STATS] accept={stats['accept']} reject={stats['reject']} "
          f"uncertain={stats['uncertain']} failed={stats['failed']} "
          f"avg_conf={avg_conf:.3f}")
    print(f"[DONE] Results saved to {output_json_path}")



def parse_args():
    parser = argparse.ArgumentParser(description="High-res crop then verify with MedGemma")
    parser.add_argument("--input_json","-i", type=str, default="./experiments/merged_annotations.json")
    parser.add_argument("--output_json","-o", type=str, default="./experiments/output.json")
    parser.add_argument("--crop_dump_dir","-cdr", type=str, default="./experiments/crops_dump")
    parser.add_argument("--expand_ratio", type=float, default=0.15)
    parser.add_argument("--min_crop_side", type=int, default=512)
    parser.add_argument("--max_crop_side", type=int, default=2048)
    parser.add_argument("--device", type=str, default="auto")
    parser.add_argument("--temperature", type=float, default=DEFAULT_TEMPERATURE)
    parser.add_argument("--top_p", type=float, default=DEFAULT_TOP_P)
    parser.add_argument("--max_new_tokens", type=int, default=DEFAULT_MAX_NEW_TOKENS)
    parser.add_argument("--model_id", type=str, default=DEFAULT_MODEL_ID)
    parser.add_argument("--skip_crop", action="store_true", help="跳过裁剪，直接使用已有 crop_path")
    return parser.parse_args()


def main():
    args = parse_args()
    process_file(
        args.input_json, args.output_json, args.crop_dump_dir,
        args.expand_ratio, args.min_crop_side, args.max_crop_side,
        args.device, args.temperature, args.top_p, args.max_new_tokens, args.model_id,
        skip_crop=args.skip_crop
    )
    print(f"[DONE] Results saved to {args.output_json}")


if __name__ == "__main__":
    main()
