#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
将 aier 的 annotations.json 转换为 check_zone 可用的 merged_annotations.json
并按比例生成“正确/错误”两类样本（错误样本使用 wrong_candidate.json 里的混淆病灶）

输出：
- merged_annotations.json：check_zone 直接可用
- sampling_manifest.json：记录每张图被分到哪一类，以及每个 bbox 的 gt/hypo 信息，便于核对

坐标系与 meta 说明（对齐 check_zone）：
- 所有 bbox 写入 224×224 坐标系
- meta.crop_info = [top, bottom, left, right] = [0, H, 0, W]（整幅原图映射到 224×224）
- meta.cropped_size = [H,w]
- meta.original_size = [H, W]
- meta.original_path 指向原图（用于后续高分辨率裁剪/推理）
"""

import os
import json
import argparse
import random
from typing import Dict, Any, List, Tuple
from PIL import Image


def _require_file(path: str, desc: str) -> None:
    if not os.path.exists(path):
        raise FileNotFoundError(f"{desc} 不存在: {path}")


def _load_json(path: str, desc: str):
    _require_file(path, desc)
    with open(path, "r", encoding="utf-8") as f:
        try:
            return json.load(f)
        except Exception as e:
            raise ValueError(f"{desc} 解析失败: {path}, 错误: {e}")


def _to_224_xyxy(norm_xyxy: List[float]) -> List[int]:
    """
    将归一化坐标 [x1,y1,x2,y2] 映射到 224×224，并做边界与最小尺寸保护
    """
    if (not isinstance(norm_xyxy, (list, tuple))) or len(norm_xyxy) != 4:
        raise ValueError(f"非法 bbox（期望4个数）: {norm_xyxy}")

    x1, y1, x2, y2 = [float(v) for v in norm_xyxy]

    # 轻度容错：clamp 到 [0,1]
    x1 = max(0.0, min(1.0, x1))
    y1 = max(0.0, min(1.0, y1))
    x2 = max(0.0, min(1.0, x2))
    y2 = max(0.0, min(1.0, y2))

    # 保证顺序
    if x2 < x1:
        x1, x2 = x2, x1
    if y2 < y1:
        y1, y2 = y2, y1

    # 映射到 224
    X1 = int(round(x1 * 224))
    Y1 = int(round(y1 * 224))
    X2 = int(round(x2 * 224))
    Y2 = int(round(y2 * 224))

    # 限制边界在 [0,223] / [1,224]
    X1 = max(0, min(223, X1))
    Y1 = max(0, min(223, Y1))
    X2 = max(1, min(224, X2))
    Y2 = max(1, min(224, Y2))

    # 至少 1 像素宽/高
    if X2 <= X1:
        X2 = min(224, X1 + 1)
    if Y2 <= Y1:
        Y2 = min(224, Y1 + 1)

    return [X1, Y1, X2, Y2]


def _choose_wrong_label(gt: str, wrong_map: Dict[str, List[str]], lesion_desc: Dict[str, str], rng: random.Random) -> str:
    """
    从 wrong_candidate 表中为 gt 选择一个混淆病灶；
    若缺失或空，则从 lesion_desc 里随机挑一个 != gt 的病灶作为替代
    """
    candidates = wrong_map.get(gt, [])
    pool = [c for c in candidates if c in lesion_desc and c != gt]
    if not pool:
        # 回退：从全部病灶中挑
        pool = [k for k in lesion_desc.keys() if k != gt]
    if not pool:
        # 实在挑不出来就只能返回原标签（这会让该 bbox 仍然是“正确”）
        return gt
    return rng.choice(pool)


def convert_and_sample(
    ann_path: str,
    lesion_desc_path: str,
    wrong_candidate_path: str,
    orignal_dir: str,
    output_json: str,
    manifest_json: str,
    num_images: int = 1000,
    ratio_wrong: float = 0.5,
    seed: int = 42,
) -> None:
    # --- 加载输入 ---
    anns = _load_json(ann_path, "annotations.json")
    lesion_desc = _load_json(lesion_desc_path, "lesion 描述字典")
    wrong_map = _load_json(wrong_candidate_path, "wrong_candidate.json")

    if not isinstance(anns, dict):
        raise ValueError("annotations.json 格式错误，顶层应为 dict")
    if not isinstance(lesion_desc, dict):
        raise ValueError("lesion 描述字典格式错误，顶层应为 dict")
    if not isinstance(wrong_map, dict):
        raise ValueError("wrong_candidate.json 格式错误，顶层应为 dict")

    # --- 收集可用图片（必须能在 orignal_dir 下打开） ---
    items: List[Tuple[str, Dict[str, Any], str]] = []
    # (key_in_ann, meta, absolute_original_path)
    for k, meta in anns.items():
        if "image_rela_dir" not in meta:
            raise KeyError(f"记录缺少 image_rela_dir: {k}")
        if "lesions" not in meta or not isinstance(meta["lesions"], list) or len(meta["lesions"]) == 0:
            continue  # 没有病灶就跳过，不参与采样

        abs_path = os.path.join(orignal_dir, meta["image_rela_dir"])
        _require_file(abs_path, "原图")
        # 试开图以确保尺寸可读
        try:
            with Image.open(abs_path) as im:
                W, H = im.size
                if W <= 0 or H <= 0:
                    raise ValueError(f"非法图像尺寸: {abs_path} -> ({W},{H})")
        except Exception as e:
            raise RuntimeError(f"无法打开原图: {abs_path}, 错误: {e}")

        items.append((k, meta, abs_path))

    if len(items) == 0:
        raise RuntimeError("没有可用的样本（存在可读原图且含 lesions）")

    # --- 采样 ---
    rng = random.Random(seed)
    rng.shuffle(items)

    N = min(num_images, len(items))
    wrong_n = int(round(N * ratio_wrong))
    wrong_n = max(0, min(N, wrong_n))
    correct_n = N - wrong_n

    sampled = items[:N]
    wrong_set = set([id(sampled[i]) for i in range(wrong_n)])
    # 注意：用对象 id 区分（保持顺序后半部分为“正确”）

    # --- 生成输出 ---
    out_entries: List[Dict[str, Any]] = []
    manifest: List[Dict[str, Any]] = []

    for obj in sampled:
        _, meta, abs_path = obj
        # 读取尺寸
        with Image.open(abs_path) as im:
            W, H = im.size

        is_wrong_image = id(obj) in wrong_set
        case_assign = "wrong" if is_wrong_image else "correct"

        entry = {
            "image_path": abs_path,  # 用原图名参与后续命名也 OK
            "bboxes": [],
            "meta": {
                "original_path": abs_path,
                "crop_info": {
                    # 整幅原图映射到 224×224
                    "crop_box": [0, H, 0, W],     # [top, bottom, left, right]
                    "original_size": [H, W],      # [H, W]
                    "cropped_size": [H,W],   # [H, W] 
                }
            }
        }

        mani_item = {
            "image_path": abs_path,
            "assign": case_assign,
            "bboxes": []
        }

        for lesion in meta["lesions"]:
            if "name" not in lesion or "bbox" not in lesion:
                raise KeyError(f"lesion 缺少 name/bbox 字段: {lesion}")

            gt_name = str(lesion["name"])
            norm_box = lesion["bbox"]
            xyxy_224 = _to_224_xyxy(norm_box)

            if is_wrong_image:
                hypo_name = _choose_wrong_label(gt_name, wrong_map, lesion_desc, rng)
            else:
                hypo_name = gt_name

            # 取描述文本（若缺失则兜底）
            desc_text = lesion_desc.get(hypo_name, f"病灶：{hypo_name}")

            entry["bboxes"].append({
                "xyxy": xyxy_224,
                "description": desc_text,
                # 追踪字段（不影响 check_zone 推理，但便于比对）
                "gt_lesion": gt_name,
                "hypo_lesion": hypo_name,
                "hypo_is_correct": (hypo_name == gt_name),
            })

            mani_item["bboxes"].append({
                "xyxy_224": xyxy_224,
                "gt_lesion": gt_name,
                "hypo_lesion": hypo_name,
                "hypo_is_correct": (hypo_name == gt_name),
            })

        out_entries.append(entry)
        manifest.append(mani_item)

    # --- 写文件 ---
    os.makedirs(os.path.dirname(os.path.abspath(output_json)), exist_ok=True)
    os.makedirs(os.path.dirname(os.path.abspath(manifest_json)), exist_ok=True)

    with open(output_json, "w", encoding="utf-8") as f:
        json.dump(out_entries, f, ensure_ascii=False, indent=2)

    with open(manifest_json, "w", encoding="utf-8") as f:
        json.dump(manifest, f, ensure_ascii=False, indent=2)

    # --- 汇总信息 ---
    print(f"[INFO] 总样本请求: {num_images} | 实际采样: {N}")
    print(f"[INFO] 其中 正确: {correct_n} | 错误: {wrong_n}")
    print(f"[OK]  merged_annotations 写入: {output_json}")
    print(f"[OK]  sampling_manifest 写入: {manifest_json}")


def parse_args():
    parser = argparse.ArgumentParser(description="将 aier 标注转换为 check_zone 输入并按比例生成正确/错误样本")
    parser.add_argument("--ann_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/aier_processed/annotations.json",
                        help="aier 的 annotations.json 路径")
    parser.add_argument("--lesion_desc_path", type=str,
                        default="/home/zhangpinglu/data0/gy/code/fundus-reasoner-adaptive/fundus_reasoner/data_preprocess/configs/med_lesion_description.json",
                        help="病灶 -> 中文描述 字典路径")
    parser.add_argument("--wrong_candidate_path", type=str,
                        default="/home/zhangpinglu/data0/gy/code/medgemma-infer/api/config/wrong_candidate.json",
                        help="病灶 -> 混淆病灶候选 字典路径")
    parser.add_argument("--orignal_dir", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/aier_orignal",
                        help="aier 原图根目录（用 image_rela_dir 拼接）")
    parser.add_argument("--output_json", type=str,
                        default="./experiments/aier_data_1000_annotations.json",
                        help="输出：check_zone 使用的 merged_annotations.json")
    parser.add_argument("--manifest_json", type=str,
                        default="./experiments/sampling_manifest.json",
                        help="输出：采样与错配清单（用于核对）")
    parser.add_argument("--num_images", type=int, default=1000, help="抽样图片数量")
    parser.add_argument("--ratio_wrong", type=float, default=0.5, help="错误样本占比（按图片）")
    parser.add_argument("--seed", type=int, default=42, help="随机种子")
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    convert_and_sample(
        ann_path=args.ann_path,
        lesion_desc_path=args.lesion_desc_path,
        wrong_candidate_path=args.wrong_candidate_path,
        orignal_dir=args.orignal_dir,
        output_json=args.output_json,
        manifest_json=args.manifest_json,
        num_images=args.num_images,
        ratio_wrong=args.ratio_wrong,
        seed=args.seed,
    )
