# -*- coding: utf-8 -*-
import argparse
import os
from os.path import join, basename, splitext
from typing import List, Dict
import json, csv

import numpy as np
from PIL import Image
from tqdm import tqdm
import torch
from skimage import measure

# 你贴的这个类，确保在你的项目能 import 到
from sam2.sam2_image_predictor import SAM2ImagePredictor
# 如果没有这个路径，就把你贴的类保存为 sam2/tools/image_predictor.py 或按实际路径改 import


def parse_args():
    p = argparse.ArgumentParser("SAM2ImagePredictor 2D 批量推理")
    # 模型
    p.add_argument("--checkpoint", type=str, default="checkpoints/MedSAM2_latest.pt", help="权重路径（本地）")
    p.add_argument("--cfg", type=str, default="sam2/configs/sam2.1_hiera_t512.yaml", help="模型配置 yaml")
    # 数据
    p.add_argument("--images_txt", type=str, required=True, help="每行一个图片路径")
    p.add_argument("--out_dir", type=str, required=True, help="输出目录")
    # 提示框
    p.add_argument("--box_mode", type=str, default="full", choices=["full", "center", "file"],
                   help="提示框来源：整图(full) / 中心框(center) / 从文件(file)")
    p.add_argument("--center_scale", type=float, default=0.6, help="中心框边长比例 * min(H,W)")
    p.add_argument("--box_file", type=str, default=None,
                   help="当 box_mode=file 时，CSV 或 JSON。CSV列: image_path,xmin,ymin,xmax,ymax；JSON: {path:[xmin,ymin,xmax,ymax]}")
    # 选项
    p.add_argument("--save_prob", action="store_true", help="另存概率图 .npy")
    p.add_argument("--save_overlay", action="store_true", help="保存叠加可视化")
    p.add_argument("--largest_cc", action="store_true", help="仅保留最大连通域")
    # 设备
    p.add_argument("--device", type=str, default="cuda:0")
    return p.parse_args()


def load_list(txt: str) -> List[str]:
    with open(txt, "r") as f:
        return [ln.strip() for ln in f if ln.strip()]


def ensure_dir(d: str):
    os.makedirs(d, exist_ok=True)


def load_box_dict(box_file: str) -> Dict[str, np.ndarray]:
    mp = {}
    if box_file.endswith(".csv"):
        with open(box_file, newline="") as f:
            r = csv.DictReader(f)
            for row in r:
                p = row["image_path"].strip()
                box = np.array([int(float(row["xmin"])), int(float(row["ymin"])),
                                int(float(row["xmax"])), int(float(row["ymax"]))], dtype=np.int64)
                mp[p] = box
    elif box_file.endswith(".json"):
        with open(box_file, "r") as f:
            data = json.load(f)
        for k, v in data.items():
            mp[k] = np.array(v, dtype=np.int64)
    else:
        raise ValueError("box_file 必须是 .csv 或 .json")
    return mp


def make_full_box(w: int, h: int) -> np.ndarray:
    return np.array([0, 0, w-1, h-1], dtype=np.int64)


def make_center_box(w: int, h: int, scale: float) -> np.ndarray:
    side = int(min(w, h) * scale)
    cx, cy = w // 2, h // 2
    x1 = max(0, cx - side // 2)
    y1 = max(0, cy - side // 2)
    x2 = min(w - 1, x1 + side - 1)
    y2 = min(h - 1, y1 + side - 1)
    return np.array([x1, y1, x2, y2], dtype=np.int64)


def largest_cc(mask_bool: np.ndarray) -> np.ndarray:
    labels = measure.label(mask_bool.astype(np.uint8))
    if labels.max() == 0:
        return mask_bool
    largest = labels == (np.argmax(np.bincount(labels.flat)[1:]) + 1)
    return largest


def overlay_mask(img_np: np.ndarray, mask_bool: np.ndarray, alpha: float = 0.5) -> Image.Image:
    color = np.array([251, 252, 30], dtype=np.uint8)  # 黄
    out = img_np.copy()
    out[mask_bool] = (alpha * color + (1 - alpha) * out[mask_bool]).astype(np.uint8)
    return Image.fromarray(out)


def main():
    args = parse_args()
    device = torch.device(args.device)

    # 1) 构建 predictor
    #   from_pretrained 用 HF；我们这里用本地 cfg+ckpt：
    from sam2.build_sam import build_sam2
    sam_model = build_sam2(args.cfg, args.checkpoint, device=device)
    predictor = SAM2ImagePredictor(sam_model)  # 可传 mask_threshold/max_hole_area 等参数

    # 2) IO
    images = load_list(args.images_txt)
    ensure_dir(args.out_dir)
    mask_dir = join(args.out_dir, "masks")
    vis_dir  = join(args.out_dir, "overlay")
    ensure_dir(mask_dir)
    if args.save_overlay:
        ensure_dir(vis_dir)

    # 3) 可选外部 bbox
    if args.box_mode == "file":
        if not args.box_file:
            raise ValueError("box_mode=file 需要 --box_file")
        box_map = load_box_dict(args.box_file)
    else:
        box_map = {}

    # 4) 逐张推理
    for p in tqdm(images, desc="SAM2 2D infer"):
        img = Image.open(p).convert("RGB")
        w, h = img.size

        # box 用**像素坐标**（predict 时我们会 normalize_coords=False）
        if args.box_mode == "full":
            box = make_full_box(w, h)
        elif args.box_mode == "center":
            box = make_center_box(w, h, args.center_scale)
        else:
            if p not in box_map:
                raise KeyError(f"{p} 在 box_file 中没有匹配的 bbox")
            box = box_map[p]

        # a) set image → 计算 embedding（内部做了 transforms）
        predictor.set_image(np.array(img))  # HWC uint8

        # b) 基于框预测。multimask_output=True，取 IoU 最优的一张
        masks_np, iou_np, lowres_np = predictor.predict(
            box=box[None, :],                 # 形状 (1,4)，也能直接传 (4,)
            multimask_output=True,
            return_logits=True,               # 返回 logit（更灵活）
            normalize_coords=False,           # 我们传的是像素坐标
        )
        # masks_np: CxHxW（logit），iou_np: C
        best = int(np.argmax(iou_np))
        prob = 1 / (1 + np.exp(-masks_np[best]))  # sigmoid
        mask_bool = (prob > 0.5)

        if args.largest_cc:
            mask_bool = largest_cc(mask_bool)

        base = splitext(basename(p))[0]
        Image.fromarray((mask_bool.astype(np.uint8) * 255)).save(join(mask_dir, f"{base}.png"))
        if args.save_prob:
            np.save(join(mask_dir, f"{base}.npy"), prob.astype(np.float32))

        if args.save_overlay:
            overlay_mask(np.array(img), mask_bool).save(join(vis_dir, f"{base}_overlay.png"))

    print(f"[Done] masks → {mask_dir}")
    if args.save_overlay:
        print(f"[Done] overlay → {vis_dir}")


if __name__ == "__main__":
    main()
