# -*- coding: utf-8 -*-
"""
文件名: gamma_preprocess.py

功能:
  - 预处理 GAMMA 数据集 (task2 Fovea localization + task3 Disc/Cup segmentation)
  - 对齐相同的 fundus 图像，生成统一标注:
      diagnosis.localization.fovea -> [norm_x, norm_y]
      diagnosis.segmentation.optic_disc / optic_cup -> 掩码路径
"""

import os
import json
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image
from data_preprocess.utils.crop import crop_resize_save,crop_resize_save_with_info
from data_preprocess.utils.stat import save_classification_stats


def scan_images(root):
    id2path = {}
    for r, _, fs in os.walk(root):
        for f in fs:
            if f.lower().endswith((".jpg", ".jpeg", ".png")):
                image_id = os.path.splitext(f)[0]
                id2path[image_id] = os.path.join(r, f)
    return id2path

def load_fovea_xlsx(xlsx_path):
    """读取 fovea 标注"""
    df = pd.read_excel(xlsx_path, engine="openpyxl")
    out = {}
    for _, row in df.iterrows():
        data_val = row["data"]
        # 防止 1.0 -> "0001"；若是字符串如 "0001" 也保持原样
        try:
            image_id = f"{int(float(data_val)) :04d}"
        except Exception:
            image_id = str(data_val).strip().zfill(4)

        out[image_id] = (float(row["Fovea_X"]), float(row["Fovea_Y"]))
    return out


def normalize_fovea_coords(img_path, fovea_xy):
    """归一化坐标到 [0,1]"""
    with Image.open(img_path) as im:
        w, h = im.size
    x, y = fovea_xy
    return [float(x) / w, float(y) / h]


def split_cup_disc(mask_path):
    """解耦分割掩码:
       0 (黑) -> Cup
       128 (灰) + 0 -> Disc
    """
    mask = np.array(Image.open(mask_path))
    cup = (mask == 0).astype(np.uint8) * 255
    disc = np.isin(mask, [0, 128]).astype(np.uint8) * 255
    cup_image=Image.fromarray(cup)
    disc_image=Image.fromarray(disc)
    return cup_image, disc_image

def gather_data(data_path, tar_path, prefix="gamma", resize=(512, 512)):
    os.makedirs(tar_path, exist_ok=True)
    img_dir = os.path.join(tar_path, "images")
    os.makedirs(img_dir, exist_ok=True)
    disc_dir = os.path.join(tar_path, "optic_disc")
    cup_dir = os.path.join(tar_path, "optic_cup")
    os.makedirs(disc_dir, exist_ok=True)
    os.makedirs(cup_dir, exist_ok=True)

    split = {"train": [], "test": []}
    annotations = {}

    # ----------------------------
    # 路径准备
    # ----------------------------
    fovea_root = os.path.join(data_path, "task2_Fovea_localization")
    seg_root = os.path.join(data_path, "task3_disc_cup_segmentation")

    fovea_train_imgs = scan_images(os.path.join(fovea_root, "training", "fundus color images"))
    fovea_test_imgs = scan_images(os.path.join(fovea_root, "testing", "fundus color images"))
    seg_train_imgs = scan_images(os.path.join(seg_root, "training", "fundus color images"))
    seg_test_imgs = scan_images(os.path.join(seg_root, "testing", "fundus color images")) # 都处理好了，无需处理

    fovea_xlsx = os.path.join(fovea_root, "training", "fovea_localization_training_GT.xlsx")
    fovea_labels = load_fovea_xlsx(fovea_xlsx)

    mask_dir = os.path.join(seg_root, "training", "Disc_Cup_Mask")

    # ----------------------------
    # 处理 train (100 张)
    # ----------------------------
    print("🔹 处理训练集")
    for image_id, img_path in tqdm(fovea_train_imgs.items(), desc="GAMMA train", unit="img"):
        if image_id not in seg_train_imgs:
            raise ValueError(f"{image_id} 在 Fovea 中存在但未在 Segmentation 中找到。")

        new_name = f"{prefix}_{image_id}.png"
        dst_path = os.path.join(img_dir, new_name)

        crop_info = crop_resize_save(img_path, dst_path, resize=resize, crop_threshold=25)

        # 分割掩码解耦
        mask_path = os.path.join(mask_dir, f"{image_id}.png")
        if not os.path.exists(mask_path):
            raise FileNotFoundError(f"Missing mask for {image_id}")

        disc_save = os.path.join(disc_dir, f"{new_name}")
        cup_save = os.path.join(cup_dir, f"{new_name}")
        cup_image,disc_image=split_cup_disc(mask_path)
        crop_resize_save_with_info(cup_image, cup_save, resize=resize, info=crop_info)
        crop_resize_save_with_info(disc_image, disc_save, resize=resize,info=crop_info)
        # fovea 标注
        if image_id not in fovea_labels:
            raise ValueError(f"No fovea label for {image_id}")
        fovea_xy = fovea_labels[image_id]
        norm_xy = normalize_fovea_coords(img_path, fovea_xy)

        annotations[new_name] = {
            "image_name": new_name,
            "image_path": dst_path,
            "original_path": img_path,
            "crop_info": crop_info,
            "diagnosis": {
                "localization": {"fovea": norm_xy},
                "segmentation": {
                    "optic disc": disc_save,
                    "optic cup": cup_save
                }
            }
        }
        split["train"].append(new_name)

    # ----------------------------
    # 处理 test (100 张)
    # ----------------------------
    print("🔹 处理测试集")
    for image_id, img_path in tqdm(fovea_test_imgs.items(), desc="GAMMA test", unit="img"):
        new_name = f"{prefix}_{image_id}.png"
        dst_path = os.path.join(img_dir, new_name)
        crop_info = crop_resize_save(img_path, dst_path, resize=resize, crop_threshold=25)

        annotations[new_name] = {
            "image_name": new_name,
            "image_path": dst_path,
            "original_path": img_path,
            "crop_info": crop_info,
            "diagnosis": {}  # 测试集没有标注
        }
        split["test"].append(new_name)

    # ----------------------------
    # 保存
    # ----------------------------
    split_path = os.path.join(tar_path, "split.json")
    with open(split_path, "w", encoding="utf-8") as f:
        json.dump(split, f, indent=4)

    anno_path = os.path.join(tar_path, "annotations.json")
    with open(anno_path, "w", encoding="utf-8") as f:
        json.dump(annotations, f, indent=4)

    save_classification_stats(annotations, "./experiments/stat/gamma.json")

    print(f"✅ GAMMA 预处理完成: train={len(split['train'])}, test={len(split['test'])}, total={len(annotations)}")
    return annotations


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="GAMMA 数据集预处理 (Fovea + Disc/Cup)")
    parser.add_argument("--data_path", type=str, default="/home/zhangpinglu/data0/gy/Dataset/public_dataset/GAMMA")
    parser.add_argument("--tar_path", type=str, default="/home/zhangpinglu/data0/gy/Dataset/public_processed/GAMMA")
    parser.add_argument("--prefix", type=str, default="gamma")
    parser.add_argument("--resize", type=int, nargs=2, default=[512, 512])
    args = parser.parse_args()

    gather_data(args.data_path, args.tar_path, prefix=args.prefix, resize=tuple(args.resize))
