"""
MuReD 数据集预处理脚本（完整版标签映射版）
-------------------------------------------------------
功能:
  - 支持从 train_data.csv 和 val_data.csv 读取多标签。
  - 使用论文中提供的 20 类疾病映射，生成完整英文标签。
  - 确保多标签按字典序排序，避免顺序导致的标签混乱。
  - 对图像进行裁剪 + resize，输出统一的 JSON 标注结构。
  - 验证所有图片尺寸与路径是否正确，不自动跳过异常。

输出:
  processed_root/
      images/
      annotations.json
      split.json

作者: GPT-5（根据用户项目风格编写）
"""

import os
import sys
import csv
import json
import argparse
from tqdm import tqdm
from data_preprocess.utils.crop import crop_resize_save
from data_preprocess.utils.stat import save_classification_stats

# -------------------------------
# 1. 标签映射表（论文定义）
# -------------------------------
LABEL_MAP = {
    "DR": "Diabetic Retinopathy",
    "NORMAL": "Normal Retina",
    "MH": "Media Haze",
    "ODC": "Optic Disc Cupping",
    "TSLN": "Tessellation",
    "ARMD": "Age Related Macular Degeneration",
    "DN": "Drusen",
    "MYA": "Myopia",
    "BRVO": "Branch Retinal Vein Occlusion",
    "ODP": "Optic Disc Pallor",
    "CRVO": "Central Retinal Vein Occlusion",
    "CNV": "Choroidal Neovascularization",
    "RS": "Retinitis",
    "ODE": "Optic Disc Edema",
    "LS": "Laser Scars",
    "CSR": "Central Serous Retinopathy",
    "HTR": "Hypertensive Retinopathy",
    "ASR": "Arteriosclerotic Retinopathy",
    "CRS": "Chorioretinitis",
    "OTHER": "Other Diseases",
}


def parse_labels(csv_path):
    """
    解析 MuReD CSV 文件，返回 dict:
        id -> [label_texts]
    """
    label_dict = {}
    with open(csv_path, "r", encoding="utf-8") as f:
        reader = csv.DictReader(f)
        for row in reader:
            img_id = row["ID"].strip()
            labels = []
            for k, v in row.items():
                if k == "ID":
                    continue
                if v.strip() == "1":
                    full_label = LABEL_MAP.get(k.strip().upper())
                    if full_label is None:
                        raise ValueError(f"未知标签 {k} 出现在 {csv_path}")
                    labels.append(full_label)
            if not labels:
                labels = ["Normal Retina"]
            # 排序，防止同一组标签不同顺序
            labels = sorted(labels)
            label_dict[img_id] = labels
    return label_dict


def gather_data(data_path, tar_path, prefix="mured", resize=(512, 512)):
    """
    MuReD 数据集预处理
    """
    os.makedirs(tar_path, exist_ok=True)
    img_out_dir = os.path.join(tar_path, "images")
    os.makedirs(img_out_dir, exist_ok=True)

    csv_train = os.path.join(data_path, "train_data.csv")
    csv_val = os.path.join(data_path, "val_data.csv")
    img_dir = os.path.join(data_path, "images")

    if not os.path.exists(csv_train):
        raise FileNotFoundError(f"未找到 {csv_train}")
    if not os.path.exists(csv_val):
        raise FileNotFoundError(f"未找到 {csv_val}")

    label_train = parse_labels(csv_train)
    label_val = parse_labels(csv_val)

    split_dict = {"train": [], "test": []}
    data_dict = {}

    # -------------------------------
    # 训练集
    # -------------------------------
    for split_name, label_dict in [("train", label_train), ("test", label_val)]:
        for img_id, labels in tqdm(label_dict.items(), desc=f"处理 {split_name}", unit="img"):
            found = False
            for ext in [".jpg", ".jpeg", ".png", ".tif", ".bmp"]:
                src_path = os.path.join(img_dir, img_id + ext)
                if os.path.exists(src_path):
                    found = True
                    break
            if not found:
                raise FileNotFoundError(f"找不到图像 {img_id}（尝试过多种扩展名）")

            new_name = f"{prefix}_{img_id}.png"
            dest_path = os.path.join(img_out_dir, new_name)

            crop_info = crop_resize_save(
                image_path=src_path,
                save_path=dest_path,
                resize=resize,
                crop_threshold=25
            )

            diagnosis_text = ", ".join(labels)
            entry = {
                "image_name": new_name,
                "image_path": dest_path,
                "original_path": src_path,
                "crop_info": crop_info,
                "diagnosis": {
                    "classification": {
                        "text": diagnosis_text
                    }
                }
            }

            data_dict[new_name] = entry
            split_dict[split_name].append(new_name)

    # 保存标注与划分
    raw_json_path = os.path.join(tar_path, "raw_data.json")
    with open(raw_json_path, "w", encoding="utf-8") as f:
        json.dump(data_dict, f, indent=4)
    with open(os.path.join(tar_path, "split.json"), "w", encoding="utf-8") as f:
        json.dump(split_dict, f, indent=4, ensure_ascii=False)

    # 统计标签分布
    os.makedirs("./experiments/stat", exist_ok=True)
    

    print(f"✅ 完成 MuReD 预处理，共 {len(data_dict)} 张图。")
    return data_dict

def merge_with_stare(tar_path, stare_ann_path):
    """
    将 MuReD 的 raw_data.json 与 STARE 的 annotations.json 进行合并：
      - 如果 MuReD 标注中 diagnosis.classification.text 含有 "other diseases" 或为空，
        则采用 STARE 对应图片的标注（若存在）。
      - 否则保留 MuReD 自己的标注。
    保存结果：
      - 原始 MuReD 标注保存为 raw_data.json（不覆盖）
      - 合并后的保存为 annotations.json

    参数：
        tar_path (str): MuReD 处理后数据所在目录（包含 raw_data.json）
        stare_ann_path (str): STARE 数据集的 annotations.json 路径
    """
    raw_json_path = os.path.join(tar_path, "raw_data.json")
    merged_json_path = os.path.join(tar_path, "annotations.json")

    if not os.path.exists(raw_json_path):
        raise FileNotFoundError(f"❌ 未找到 MuReD 原始标注文件: {raw_json_path}")
    if not os.path.exists(stare_ann_path):
        raise FileNotFoundError(f"❌ 未找到 STARE 标注文件: {stare_ann_path}")

    # 读取 MuReD 和 STARE 的标注
    with open(raw_json_path, "r", encoding="utf-8") as f:
        mured_data = json.load(f)
    with open(stare_ann_path, "r", encoding="utf-8") as f:
        stare_data = json.load(f)

    merged_data = {}

    # 统计信息
    total = len(mured_data)
    use_stare_count = 0
    use_mured_count = 0
    no_stare_found = 0

    for key, val in mured_data.items():
        diag = (
            val.get("diagnosis", {})
               .get("classification", {})
               .get("text", "")
               .strip()
               .lower()
        )

        # 判断是否需要用 STARE 替换
        if "im" in key and (diag == "" or "other" in diag):
            # 尝试找到 STARE 对应项
            stare_key = key.replace("mured_", "stare_")
            if stare_key in stare_data:
                merged_data[key] = stare_data[stare_key]
                use_stare_count += 1
            else:
                
                merged_data[key] = val
                
        else:
            merged_data[key] = val
            use_mured_count += 1

    # 保存结果
    with open(merged_json_path, "w", encoding="utf-8") as f:
        json.dump(merged_data, f, indent=4, ensure_ascii=False)

    print("✅ 合并完成：")
    print(f"  总计图片数: {total}")
    print(f"  使用 STARE 标注替换: {use_stare_count}")
    print(f"  保留 MuReD 标注: {use_mured_count}")
    print(f"  STARE 未找到匹配项: {no_stare_found}")
    print(f"  ✅ 保存合并结果到 {merged_json_path}")
    return merged_data

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="MuReD 数据集预处理（多标签版）")
    parser.add_argument("--data_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_dataset/MuReD",
                        help="MuReD 原始数据路径")
    parser.add_argument("--tar_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_processed/MuReD",
                        help="输出路径")
    parser.add_argument("--stare_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_processed/STARE/annotations.json",
                        help="STARE处理后的path")
    parser.add_argument("--prefix", type=str, default="mured")
    parser.add_argument("--resize", type=int, nargs=2, default=[512, 512])
    args = parser.parse_args()

    data_dict=gather_data(args.data_path, args.tar_path, prefix=args.prefix, resize=tuple(args.resize))
    merged_data=merge_with_stare(args.tar_path, args.stare_path)
    save_classification_stats(data_dict, os.path.join("./experiments/stat", "mured.json"))