# zeroshoot_fundus_seg.py
# -*- coding: utf-8 -*-
"""
0-shot Fundus segmentation with MedSegX (SAM-based)
- 对每张图分别推理 OpticCup / OpticDisc
- 自动按 SAM encoder 的输入尺寸做 resize
- 整图 box 作为提示
- modal 固定为 6 (Fundus)
- 保存二值掩码（可选保存概率图）
"""

import argparse
import os
from os.path import join, dirname, basename, splitext
from typing import List

import numpy as np
from PIL import Image
from tqdm import tqdm

import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import DataLoader

from segment_anything import sam_model_registry, sam_model_checkpoint
from model import *  # MedSegX / MedSAM 等
from data.fundus_seg_dataset import fundusSeg


def load_image_list(txt_path: str) -> List[str]:
    with open(txt_path, "r") as f:
        return [line.strip() for line in f if line.strip()]

def ensure_dir(d: str):
    os.makedirs(d, exist_ok=True)

def save_mask_png(mask_bool: np.ndarray, save_path: str):
    ensure_dir(os.path.dirname(save_path))
    Image.fromarray((mask_bool.astype(np.uint8) * 255)).save(save_path)

def save_prob_npy(prob: np.ndarray, save_path: str):
    ensure_dir(os.path.dirname(save_path))
    np.save(save_path, prob)

def infer_one_task(model, device, image_paths: List[str], organ_name: str,
                   out_dir_task: str, modal_id: int, batch_size: int, num_workers: int,
                   prob_thresh: float, save_prob: bool):

    img_size = model.module.sam.image_encoder.img_size
    ds = fundusSeg(image_paths, organ_name=organ_name, modal_id=modal_id, sam_input_size=img_size)
    dl = DataLoader(ds, batch_size=batch_size, shuffle=False,
                    num_workers=num_workers, pin_memory=True)

    model.eval()
    with torch.no_grad():
        for data, _ in tqdm(dl, desc=f"Infer {organ_name}"):
            # 保持与原工程一致的键
            imgs = data["img"].to(device, non_blocking=True)         # [B,3,S,S]
            boxes = data["box"].to(device, non_blocking=True)        # [B,4]
            names = data["name"]                                      # list

            # modal 转 tensor
            modal = data["modal"]
            if not torch.is_tensor(modal):
                modal = torch.tensor([modal] * imgs.shape[0], device=device, dtype=torch.long)
            else:
                modal = modal.to(device, non_blocking=True)

            # organ 转 tensor tuple
            organ_tuple = tuple(
                (x if torch.is_tensor(x) else torch.tensor(x, device=device, dtype=torch.long))
                for x in data["organ"]
            )

            # 前向
            mask_pred = model({"img": imgs, "box": boxes, "modal": modal, "organ": organ_tuple, "name": names})

            # 规范输出：多掩码取 sigmoid 后沿 K 取最大，否则单通道
            if mask_pred.dim() == 4:
                # [B, K, H, W]
                prob = torch.sigmoid(mask_pred)
                probs, _ = torch.max(prob, dim=1)  # [B,H,W]
            else:
                probs = torch.sigmoid(mask_pred).squeeze(1)  # [B,H,W]

            probs_np = probs.cpu().numpy()
            mask_bool = probs_np > prob_thresh

            # 保存
            for i, p in enumerate(names):
                base = splitext(basename(p))[0]
                save_png = join(out_dir_task, f"{base}.png")
                save_mask_png(mask_bool[i], save_png)
                if save_prob:
                    save_prob_npy(probs_np[i], join(out_dir_task, f"{base}.npy"))


def main(args):
    device = torch.device(args.device)

    # --- 构建 SAM + MedSegX ---
    ckpt_sam = join(args.checkpoint, sam_model_checkpoint[args.model_type])
    sam_model = sam_model_registry[args.model_type](image_size=256, keep_resolution=True, checkpoint=ckpt_sam)

    if args.method == "medsam":
        model = MedSAM(sam_model).to(device)
    elif args.method == "medsegx":
        model = MedSegX(sam_model, args.bottleneck_dim, args.embedding_dim, args.expert_num).to(device)
    else:
        raise NotImplementedError(f"Method {args.method} not implemented!")

    model = nn.DataParallel(model, device_ids=args.device_ids)

    # --- 加载权重 ---
    if os.path.isfile(args.model_weight):
        print(f"[Info] load model from {args.model_weight}")
        checkpoint = torch.load(args.model_weight, map_location=device)
        model.module.load_parameters(checkpoint["model"])
    else:
        raise FileNotFoundError(f"model weight {args.model_weight} not found!")

    # --- 准备输入/输出 ---
    image_paths = load_image_list(args.images_txt)
    print(f"[Info] {len(image_paths)} images loaded from {args.images_txt}")
    out_dir_cup  = join(args.out_dir, "optic_cup")
    out_dir_disc = join(args.out_dir, "optic_disc")
    ensure_dir(out_dir_cup); ensure_dir(out_dir_disc)

    # --- 推理两个任务 ---
    infer_one_task(
        model, device, image_paths, organ_name="OpticCup",
        out_dir_task=out_dir_cup, modal_id=args.fundus_modal_id,
        batch_size=args.batch_size, num_workers=args.num_workers,
        prob_thresh=args.prob_thresh, save_prob=args.save_prob
    )
    infer_one_task(
        model, device, image_paths, organ_name="OpticDisc",
        out_dir_task=out_dir_disc, modal_id=args.fundus_modal_id,
        batch_size=args.batch_size, num_workers=args.num_workers,
        prob_thresh=args.prob_thresh, save_prob=args.save_prob
    )
    print(f"[Done] Masks saved to: {args.out_dir}")


if __name__ == "__main__":
    parser = argparse.ArgumentParser("0-shot Fundus segmentation with MedSegX", add_help=False)
    # model
    parser.add_argument("--checkpoint",    type=str, default="./playground/SAM",
                        help="path to SAM checkpoint folder")
    parser.add_argument("--model_type",    type=str, default="vit_b",
                        help="SAM model scale (e.g vit_b, vit_l, vit_h)")
    parser.add_argument("--model_weight",  type=str, default="./playground/MedSegX/medsegx_vit_b.pth",
                        help="path to MedSegX model weight")
    parser.add_argument("--method",        type=str, default="medsegx")
    parser.add_argument("--bottleneck_dim",type=int, default=16)
    parser.add_argument("--embedding_dim", type=int, default=16)
    parser.add_argument("--expert_num",    type=int, default=4)
    # io
    parser.add_argument("--images_txt",    type=str, required=True,
                        help="txt file of image paths (one path per line)")
    parser.add_argument("--out_dir",       type=str, required=True,
                        help="directory to save masks")
    # runtime
    parser.add_argument("--device",        type=str, default="cuda:0")
    parser.add_argument("--device_ids",    type=int, nargs="+", default=[0],
                        help="device ids for DataParallel")
    parser.add_argument("--batch_size",    type=int, default=16)
    parser.add_argument("--num_workers",   type=int, default=8)
    # options
    parser.add_argument("--fundus_modal_id", type=int, default=6,
                        help="Fundus modal id (default 6)")
    parser.add_argument("--prob_thresh",     type=float, default=0.5,
                        help="probability threshold to binarize masks")
    parser.add_argument("--save_prob",       action="store_true",
                        help="also save probability map as .npy")

    args = parser.parse_args()
    main(args)
