# -*- coding: utf-8 -*-
"""
RIM-ONE_DL 数据集预处理脚本
功能：
    1. 生成标准化结构的分类+分割标注。
    2. 每张图片都包含诊断分类标签（normal/glaucoma）。
    3. 每张图片都带有 optic disc 与 optic cup 的分割掩码。
    4. 输出:
        - images/            （裁剪后的图片）
        - optic_disc/        （对应disc分割mask）
        - optic_cup/         （对应cup分割mask）
        - split.json         （train/test划分）
        - annotations.json   （完整标注）
        - experiments/stat/rimone.json 统计信息
"""

import os
import sys
import json
import argparse
from tqdm import tqdm
from PIL import Image
from data_preprocess.utils.crop import crop_resize_save, crop_resize_save_with_info
from data_preprocess.utils.stat import save_classification_stats

def build_seg_map(seg_root):
    """
    读取分割掩码路径，返回：
    id -> { 'optic_disc': path, 'optic_cup': path }
    """
    seg_map = {}
    for cls in ["normal", "glaucoma"]:
        cls_dir = os.path.join(seg_root, cls)
        if not os.path.exists(cls_dir):
            raise ValueError("路径错误")
        for fname in os.listdir(cls_dir):
            if not fname.endswith((".png", ".jpg", ".jpeg")):
                continue
            # 示例：r1_Im001-1-Cup-T.png
            if "-Cup-" in fname:
                img_id = fname[:-10]
                seg_map.setdefault(img_id, {})["optic cup"] = os.path.join(cls_dir, fname)
            elif "-Disc-" in fname:
                img_id = fname[:-11]
                seg_map.setdefault(img_id, {})["optic disc"] = os.path.join(cls_dir, fname)
    return seg_map


def gather_data(data_path, tar_path, prefix="rimone", resize=(512, 512)):
    """
    RIM-ONE_DL 数据集预处理
    """
    # 目录准备
    os.makedirs(tar_path, exist_ok=True)
    image_dir = os.path.join(tar_path, "images")
    cup_dir = os.path.join(tar_path, "optic_cup")
    disc_dir = os.path.join(tar_path, "optic_disc")
    os.makedirs(image_dir, exist_ok=True)
    os.makedirs(cup_dir, exist_ok=True)
    os.makedirs(disc_dir, exist_ok=True)

    split_dict = {"train": [], "test": []}
    data_dict = {}

    seg_root = os.path.join(data_path, "RIM-ONE_DL_reference_segmentations")
    seg_map = build_seg_map(seg_root)

    part_root = os.path.join(data_path, "RIM-ONE_DL_images","partitioned_by_hospital")
    train_root = os.path.join(part_root, "training_set")
    test_root = os.path.join(part_root, "test_set")

    for split, split_root in [("train", train_root), ("test", test_root)]:
        if not os.path.exists(split_root):
            print(f"⚠️ 跳过 {split}, 路径不存在: {split_root}")
            continue

        for cls in ["normal", "glaucoma"]:
            cls_dir = os.path.join(split_root, cls)
            if not os.path.exists(cls_dir):
                continue
            image_list = sorted(os.listdir(cls_dir))
            for image_name in tqdm(image_list, desc=f"{split}-{cls}", unit="img"):
                if not image_name.lower().endswith((".png", ".jpg", ".jpeg")):
                    continue

                img_id = os.path.splitext(image_name)[0]
                src_path = os.path.join(cls_dir, image_name)
                new_name = f"{prefix}_{img_id}.png"
                dst_path = os.path.join(image_dir, new_name)

                try:
                    crop_info = crop_resize_save(
                        image_path=src_path,
                        save_path=dst_path,
                        resize=resize,
                        crop_threshold=25
                    )
                except Exception as e:
                    raise RuntimeError(f"❌ 图像处理失败 {src_path}: {e}")

                # 构造标注
                entry = {
                    "image_name": new_name,
                    "image_path": dst_path,
                    "original_path": src_path,
                    "crop_info": crop_info,
                    "diagnosis": {
                        "classification": {"text": cls},
                        "segmentation": {}
                    }
                }
                if img_id not in seg_map:
                    # 这个文件的命名非常混乱，有可能会多一个-l
                    img_id+='-1'
                # 分割文件
                if img_id in seg_map:
                    seg_info = seg_map[img_id]
                    for cat, seg_path in seg_info.items():
                        save_dir = cup_dir if cat == "optic_cup" else disc_dir
                        os.makedirs(save_dir, exist_ok=True)
                        mask_save_path = os.path.join(save_dir, new_name)
                        try:
                            crop_resize_save_with_info(
                                image_path=seg_path,
                                save_path=mask_save_path,
                                info=crop_info,
                                resize=resize
                            )
                            entry["diagnosis"]["segmentation"][cat] = mask_save_path
                        except Exception as e:
                            raise RuntimeError(f"❌ 分割掩码处理失败 {seg_path}: {e}")
                else:
                    raise ValueError(f"❌ 找不到 |{img_id}| 的分割文件")

                data_dict[new_name] = entry
                split_dict[split].append(new_name)

    # 保存标注
    with open(os.path.join(tar_path, "split.json"), "w", encoding="utf-8") as f:
        json.dump(split_dict, f, indent=4)
    with open(os.path.join(tar_path, "annotations.json"), "w", encoding="utf-8") as f:
        json.dump(data_dict, f, indent=4)

    os.makedirs("./experiments/stat", exist_ok=True)
    save_classification_stats(data_dict, "./experiments/stat/rimone.json")

    print(f"✅ 完成 RIM-ONE_DL 数据集预处理，共 {len(data_dict)} 张图像。")
    return data_dict


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="RIM-ONE_DL 数据集预处理")
    parser.add_argument("--data_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_dataset/RIM-ONE_DL",
                        help="原始 RIM-ONE_DL 数据集路径")
    parser.add_argument("--tar_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_processed/RIM-ONE_DL",
                        help="预处理结果保存路径")
    parser.add_argument("--prefix", type=str, default="rimone",
                        help="输出文件名前缀")
    parser.add_argument("--resize", type=int, nargs=2, default=[512, 512],
                        help="输出尺寸 (宽, 高)")
    args = parser.parse_args()

    gather_data(args.data_path, args.tar_path, prefix=args.prefix, resize=tuple(args.resize))
