# -*- coding: utf-8 -*-
"""
文件名: deepdrid_preprocess.py

功能概述:
    将 DeepDRiD 数据集（regular fundus 与 ultra-widefield 两种模态）的
    训练/验证/线上测试数据统一预处理为项目标准结构，包含:
      - images/: 归一化后的图像（regular: 裁剪+resize；UWF: 不裁剪、不缩放）
      - split.json: 训练/测试划分（train=训练+验证；test=线上挑战）
      - annotations.json: 每张图的标注，采用统一语义化格式:
          diagnosis.classification.text   -> DR 的完整自然语言描述（DR=5 则不写）
          diagnosis.classification.quality-> 由四个质量项拼句后的自然语言（任何一个缺失则不写）
      - experiments/stat/deepdrid.json: 分类统计

关键实现要点(与项目规范对齐):
    1) 先收集所有图片，构建 image_id -> abs_path 映射(regular & UWF 分开)；
    2) 读取所有 CSV / XLSX 标签并整合到同一套语义文本；
    3) 严格异常：映射缺失、非法代码、双眼同时标注等 -> 直接 raise；
    4) 不写空字段：缺失 quality 就不要写 "quality"，DR=5 就不写 "text"；
    5) UWF 不裁剪不缩放，仅复制，crop_info 填原图尺寸；Regular 使用 crop_resize_save。

使用样例:
    python deepdrid_preprocess.py \
        --data_path /home/zhangpinglu/data0/gy/Dataset/public_dataset/DeepDRiD-master \
        --tar_path   /home/zhangpinglu/data0/gy/Dataset/public_processed/DeepDRiD \
        --prefix drid

作者: zym1105
"""

import os
import json
import argparse
from typing import Dict, Tuple, Optional, List
import pandas as pd
from tqdm import tqdm
from PIL import Image

from data_preprocess.utils.crop import crop_resize_save
from data_preprocess.utils.stat import save_classification_stats


# -----------------------------
# 语义映射（严格，不给默认）
# -----------------------------

DR_LEVEL_MAP: Dict[int, Optional[str]] = {
    0: "normal",
    1: "Mild non-proliferative diabetic retinopathy",
    2: "Moderate non-proliferative diabetic retinopathy",
    3: "Severe non-proliferative diabetic retinopathy",
    4: "Proliferative diabetic retinopathy",
    5: None,  # 图像质量太差，无法诊断与分级 -> 不写 text
}

QUALITY_OVERALL = {
    0: "Quality is not good enough for the diagnosis of retinal diseases.",
    1: "Quality is good enough for the diagnosis of retinal diseases.",
}

QUALITY_ARTIFACT = {
    0: "Do not contain artifacts.",
    1: "Outside the aortic arch with range less than one fourth of the image.",
    4: "Do not affect the macular area with scope less than one fourth.",
    6: "Cover more than one fourth but less than one half of the image.",
    8: "Cover more than one half without fully covering the posterior pole.",
    10: "Cover the entire posterior pole.",
}

QUALITY_CLARITY = {
    1: "Only level 1 vascular arch can be identified.",
    4: "Level 2 vascular arch and a small number of lesions can be identified.",
    6: "Level 3 vascular arch and some lesions can be identified.",
    8: "Level 3 vascular arch and most lesions can be identified.",
    10: "Level 3 vascular arch and all lesions can be identified.",
}

QUALITY_FIELD_DEF = {
    1: "Does not include the optic disc and macula.",
    4: "Includes either the optic disc or the macula.",
    6: "Includes both the optic disc and the macula.",
    8: "The optic disc and macula are within two PDs of the center.",
    10: "The optic disc and macula are within one PD of the center.",
}


def compose_quality_sentence(
    overall: int, artifact: int, clarity: int, field_def: int
) -> str:
    """
    将四个质量指标拼成自然语言句子。
    - 任一值不在映射中 -> raise
    """
    if overall not in QUALITY_OVERALL:
        raise ValueError(f"Invalid Overall quality value: {overall}")
    if artifact not in QUALITY_ARTIFACT:
        raise ValueError(f"Invalid Artifact value: {artifact}")
    if clarity not in QUALITY_CLARITY:
        raise ValueError(f"Invalid Clarity value: {clarity}")
    if field_def not in QUALITY_FIELD_DEF:
        raise ValueError(f"Invalid Field definition value: {field_def}")

    return (
        f"{QUALITY_OVERALL[overall]} "
        f"For artifact, {QUALITY_ARTIFACT[artifact]} "
        f"For clarity, {QUALITY_CLARITY[clarity]} "
        f"For field, {QUALITY_FIELD_DEF[field_def]}"
    )


# -----------------------------
# 工具函数
# -----------------------------

def scan_images(root: str) -> Dict[str, str]:
    """
    递归扫描 root 下所有图片文件，返回 {image_id(不含后缀): abs_path}
    注意：DeepDRiD 的图片分散在多级 Images 目录里。
    """
    id2path: Dict[str, str] = {}
    for r, _, files in os.walk(root):
        for f in files:
            fl = f.lower()
            if fl.endswith((".jpg", ".jpeg", ".png", ".bmp", ".tif", ".tiff")):
                image_id = os.path.splitext(f)[0].strip()
                abs_path = os.path.join(r, f)
                if image_id in id2path:
                    raise ValueError(f"Duplicate image_id found while scanning: {image_id}\n"
                                     f"{id2path[image_id]}\n{abs_path}")
                id2path[image_id] = abs_path
    print(f"Founding image number:{len(id2path.keys())}")
    return id2path


def make_crop_info_from_image(img_path: str) -> Dict:
    """UWF 模态不裁剪不缩放，生成基于原图尺寸的 crop_info。"""
    with Image.open(img_path) as im:
        w, h = im.size
    return {
        "crop_box": [0, w, 0, h],
        "original_size": [w, h],
        "cropped_size": [w, h],
    }


def safe_int(x) -> Optional[int]:
    """
    将数字字符串安全转 int；
    - 空/None/'' -> None
    - '0.0' / '1.0' / 0.0 -> int(0/1)
    - 2.5 或 'abc' -> raise
    """
    if x is None:
        return None
    s = str(x).strip()
    if s == "":
        return None

    # 如果是纯整数
    if s.isdigit() or (s.startswith('-') and s[1:].isdigit()):
        return int(s)

    # 如果是浮点但小数部分为 0，如 '0.0', '4.0'
    try:
        f = float(s)
        if f.is_integer():
            return int(f)
        else:
            raise ValueError(f"Value '{x}' is a non-integer float.")
    except ValueError:
        raise ValueError(f"Value '{x}' is not a valid integer.")


# -----------------------------
# 标签加载（regular）
# -----------------------------

import warnings

def load_regular_csv_labels(csv_path: str) -> Dict[str, Dict]:
    """
    读取 regular-fundus-(training|validation).csv
    返回: { image_id: { 'dr_level': int or None,
                       'overall': int or None, 'artifact': int or None,
                       'clarity': int or None, 'field': int or None } }
    规则:
      - 该 CSV 行对应的就是一张图片(image_id)，有可能是左眼或右眼；
      - 若 left_eye_DR_Level 与 right_eye_DR_Level 同时都有值 -> 直接 raise；
      - dr_level 优先取 (左/右) 对应的那个；若都空，再取 patient_DR_Level；
      - 若质量标注部分缺失（但非全部缺失）-> 打印警告。
    """
    if not os.path.exists(csv_path):
        raise FileNotFoundError(f"CSV not found: {csv_path}")

    df = pd.read_csv(csv_path).fillna("")
    out: Dict[str, Dict] = {}
    for _, row in df.iterrows():
        image_id = str(row["image_id"]).strip()

        left = safe_int(row.get("left_eye_DR_Level", ""))
        right = safe_int(row.get("right_eye_DR_Level", ""))
        patient = safe_int(row.get("patient_DR_Level", ""))

        # 双眼同时标注 -> 标错
        if left is not None and right is not None:
            raise ValueError(f"Both left and right DR levels present for image_id={image_id} in {csv_path}")

        dr_level = left if left is not None else right
        if dr_level is None:
            dr_level = patient

        overall = safe_int(row.get("Overall quality", ""))
        clarity = safe_int(row.get("Clarity", ""))
        field_def = safe_int(row.get("Field definition", ""))
        artifact = safe_int(row.get("Artifact", ""))

        quality_values = [overall, artifact, clarity, field_def]
        present = sum(v is not None for v in quality_values)

        # 若部分缺失但非全缺失，则打印警告
        if 0 < present < 4:
            warnings.warn(
                f"[Warning] Partial quality info missing for {image_id} in {os.path.basename(csv_path)} "
                f"(overall={overall}, artifact={artifact}, clarity={clarity}, field={field_def})",
                RuntimeWarning
            )

        out[image_id] = {
            "dr_level": dr_level,
            "overall": overall,
            "artifact": artifact,
            "clarity": clarity,
            "field": field_def,
        }

    return out



def load_regular_challenge_labels(ch1_xlsx: str, ch2_xlsx: str) -> Tuple[Dict[str, int], Dict[str, Dict[str, int]]]:
    """
    读取 Online-Challenge1&2-Evaluation 的 XLSX
      - ch1: {'image_id','DR_Levels'} -> {image_id: int}
      - ch2: {'image_id','Overall quality','Artifact','Clarity','Field definition'}
           -> {image_id: {'overall':int, 'artifact':int, 'clarity':int, 'field':int}}
    """
    if not os.path.exists(ch1_xlsx):
        raise FileNotFoundError(f"Challenge1 xlsx not found: {ch1_xlsx}")
    if not os.path.exists(ch2_xlsx):
        raise FileNotFoundError(f"Challenge2 xlsx not found: {ch2_xlsx}")

    df1 = pd.read_excel(ch1_xlsx, engine="openpyxl").fillna("")
    df2 = pd.read_excel(ch2_xlsx, engine="openpyxl").fillna("")

    ch1_map: Dict[str, int] = {}
    for _, row in df1.iterrows():
        image_id = str(row["image_id"]).strip()
        dr_lv = safe_int(row["DR_Levels"])
        if dr_lv is None:
            raise ValueError(f"Missing DR_Levels for image_id={image_id} in {ch1_xlsx}")
        ch1_map[image_id] = dr_lv

    ch2_map: Dict[str, Dict[str, int]] = {}
    for _, row in df2.iterrows():
        image_id = str(row["image_id"]).strip()
        overall = safe_int(row["Overall quality"])
        artifact = safe_int(row["Artifact"])
        clarity = safe_int(row["Clarity"])
        field = safe_int(row["Field definition"])
        ch2_map[image_id] = {
            "overall": overall, "artifact": artifact, "clarity": clarity, "field": field
        }

    return ch1_map, ch2_map


# -----------------------------
# 标签加载（UWF）
# -----------------------------

def load_uwf_csv_labels(csv_path: str) -> Dict[str, Dict]:
    """
    读取 ultra-widefield-(training|validation).csv
    返回: { image_id: { 'dr_level': int or None } }
    """
    if not os.path.exists(csv_path):
        raise FileNotFoundError(f"CSV not found: {csv_path}")
    df = pd.read_csv(csv_path).fillna("")
    out: Dict[str, Dict] = {}
    for _, row in df.iterrows():
        image_id = str(row["image_id"]).strip()
        dr_level = safe_int(row.get("DR_level", ""))
        out[image_id] = {"dr_level": dr_level}
    return out


def load_uwf_challenge_labels(ch3_xlsx: str) -> Dict[str, int]:
    """
    读取 Online-Challenge3-Evaluation 的 XLSX:
      {'image_id','UWF_DR_Levels'} -> {image_id: int}
    """
    if not os.path.exists(ch3_xlsx):
        raise FileNotFoundError(f"Challenge3 xlsx not found: {ch3_xlsx}")
    df = pd.read_excel(ch3_xlsx, engine="openpyxl").fillna("")
    out: Dict[str, int] = {}
    for _, row in df.iterrows():
        image_id = str(row["image_id"]).strip()
        lv = safe_int(row["UWF_DR_Levels"])
        if lv is None:
            raise ValueError(f"Missing UWF_DR_Levels for image_id={image_id} in {ch3_xlsx}")
        out[image_id] = lv
    return out


# -----------------------------
# 主流程
# -----------------------------

def gather_data(data_path: str, tar_path: str, prefix: str = "drid", resize: Tuple[int, int] = (512, 512)):
    os.makedirs(tar_path, exist_ok=True)
    img_dir = os.path.join(tar_path, "images")
    os.makedirs(img_dir, exist_ok=True)

    split = {"train": [], "test": []}
    annotations: Dict[str, Dict] = {}

    # ---------- Regular: 收图、建映射 ----------
    reg_root = os.path.join(data_path, "regular_fundus_images")

    reg_train_img_root = os.path.join(reg_root, "regular-fundus-training", "Images")
    reg_val_img_root   = os.path.join(reg_root, "regular-fundus-validation", "Images")
    reg_eval_img_root  = os.path.join(reg_root, "Online-Challenge1&2-Evaluation", "Images")

    reg_train_ids = scan_images(reg_train_img_root)  # {image_id: path}
    reg_val_ids   = scan_images(reg_val_img_root)
    reg_eval_ids  = scan_images(reg_eval_img_root)

    # Regular 标签
    reg_csv_train = os.path.join(reg_root, "regular-fundus-training", "regular-fundus-training.csv")
    reg_csv_val   = os.path.join(reg_root, "regular-fundus-validation", "regular-fundus-validation.csv")
    reg_train_map = load_regular_csv_labels(reg_csv_train)
    reg_val_map   = load_regular_csv_labels(reg_csv_val)

    ch1_xlsx = os.path.join(reg_root, "Online-Challenge1&2-Evaluation", "Challenge1_labels.xlsx")
    ch2_xlsx = os.path.join(reg_root, "Online-Challenge1&2-Evaluation", "Challenge2_labels.xlsx")
    reg_ch1_map, reg_ch2_map = load_regular_challenge_labels(ch1_xlsx, ch2_xlsx)

    # ---------- UWF: 收图、建映射 ----------
    uwf_root = os.path.join(data_path, "ultra-widefield_images")
    uwf_train_img_root = os.path.join(uwf_root, "ultra-widefield-training", "Images")
    uwf_val_img_root   = os.path.join(uwf_root, "ultra-widefield-validation", "Images")
    uwf_eval_img_root  = os.path.join(uwf_root, "Online-Challenge3-Evaluation", "Images")

    uwf_train_ids = scan_images(uwf_train_img_root)
    uwf_val_ids   = scan_images(uwf_val_img_root)
    uwf_eval_ids  = scan_images(uwf_eval_img_root)

    uwf_csv_train = os.path.join(uwf_root, "ultra-widefield-training", "ultra-widefield-training.csv")
    uwf_csv_val   = os.path.join(uwf_root, "ultra-widefield-validation", "ultra-widefield-validation.csv")
    uwf_train_map = load_uwf_csv_labels(uwf_csv_train)
    uwf_val_map   = load_uwf_csv_labels(uwf_csv_val)

    ch3_xlsx = os.path.join(uwf_root, "Online-Challenge3-Evaluation", "Challenge3_labels.xlsx")
    uwf_ch3_map = load_uwf_challenge_labels(ch3_xlsx)

    # -----------------------------------------
    # Regular: 处理 train (train + val)
    # -----------------------------------------
    print("🔹 Regular Fundus -> train (training + validation)")
    for image_id, img_path in tqdm({**reg_train_ids, **reg_val_ids}.items(), desc="Regular train/val", unit="img"):
        new_name = f"{prefix}_regular_{image_id}.png"
        dst_path = os.path.join(img_dir, new_name)

        # 裁剪+resize
        crop_info = crop_resize_save(
            image_path=img_path,
            save_path=dst_path,
            resize=resize,
            crop_threshold=25
        )

        # 标签：优先各自 csv
        m = reg_train_map.get(image_id)
        if m is None:
            m = reg_val_map.get(image_id)

        classification = {}

        # DR text
        if m is not None and m.get("dr_level") is not None:
            dr = m["dr_level"]
            if dr not in DR_LEVEL_MAP:
                raise ValueError(f"Unknown DR level {dr} for {image_id} (regular train/val csv)")
            dr_text = DR_LEVEL_MAP[dr]
            if dr_text:  # DR=5 -> None，不写 text
                classification["text"] = dr_text

        # quality（四项都存在才写；任一不在映射中 -> raise）
        if m is not None:
            overall, artifact, clarity, field_def = m["overall"], m["artifact"], m["clarity"], m["field"]
            if all(v is not None for v in (overall, artifact, clarity, field_def)):
                quality_sentence = compose_quality_sentence(overall, artifact, clarity, field_def)
                classification["quality"] = quality_sentence

        entry = {
            "image_name": new_name,
            "image_path": dst_path,
            "original_path": img_path,
            "crop_info": crop_info,
            "diagnosis": {
                "classification": classification
            }
        }
        annotations[new_name] = entry
        split["train"].append(new_name)

    # -----------------------------------------
    # Regular: 处理 test (Online-Challenge1&2)
    # -----------------------------------------
    print("🔹 Regular Fundus -> test (Online-Challenge1&2-Evaluation)")
    for image_id, img_path in tqdm(reg_eval_ids.items(), desc="Regular test", unit="img"):
        new_name = f"{prefix}_regular_{image_id}.png"
        dst_path = os.path.join(img_dir, new_name)

        crop_info = crop_resize_save(
            image_path=img_path,
            save_path=dst_path,
            resize=resize,
            crop_threshold=25
        )

        classification = {}

        # DR from Challenge1
        if image_id in reg_ch1_map:
            dr = reg_ch1_map[image_id]
            if dr not in DR_LEVEL_MAP:
                raise ValueError(f"Unknown DR level {dr} for {image_id} (Challenge1)")
            dr_text = DR_LEVEL_MAP[dr]
            if dr_text:
                classification["text"] = dr_text

        # Quality from Challenge2 (四项必须齐全才写)
        if image_id in reg_ch2_map:
            q = reg_ch2_map[image_id]
            overall = q.get("overall")
            artifact = q.get("artifact")
            clarity = q.get("clarity")
            field_def = q.get("field")
            if None not in (overall, artifact, clarity, field_def):
                quality_sentence = compose_quality_sentence(overall, artifact, clarity, field_def)
                classification["quality"] = quality_sentence

        entry = {
            "image_name": new_name,
            "image_path": dst_path,
            "original_path": img_path,
            "crop_info": crop_info,
            "diagnosis": {
                "classification": classification
            }
        }
        annotations[new_name] = entry
        split["test"].append(new_name)

    # -----------------------------------------
    # UWF: 处理 train (training + validation)
    # -----------------------------------------
    print("🔹 Ultra-widefield -> train (training + validation)")
    for image_id, img_path in tqdm({**uwf_train_ids, **uwf_val_ids}.items(), desc="UWF train/val", unit="img"):
        new_name = f"{prefix}_uwf_{image_id}.png"
        dst_path = os.path.join(img_dir, new_name)

        # UWF 不裁剪不缩放，仅复制；crop_info 用原图尺寸
        with Image.open(img_path) as im:
            im.resize(size=(512,512)).save(dst_path)
        crop_info = make_crop_info_from_image(img_path)

        # 标签（csv）
        m = uwf_train_map.get(image_id)
        if m is None:
            m = uwf_val_map.get(image_id)

        classification = {}
        if m is not None and m.get("dr_level") is not None:
            dr = m["dr_level"]
            if dr not in DR_LEVEL_MAP:
                raise ValueError(f"Unknown DR level {dr} for {image_id} (UWF train/val csv)")
            dr_text = DR_LEVEL_MAP[dr]
            if dr_text:
                classification["text"] = dr_text
        # UWF 不写 quality

        entry = {
            "image_name": new_name,
            "image_path": dst_path,
            "original_path": img_path,
            "crop_info": crop_info,
            "diagnosis": {
                "classification": classification
            }
        }
        annotations[new_name] = entry
        split["train"].append(new_name)

    # -----------------------------------------
    # UWF: 处理 test (Online-Challenge3)
    # -----------------------------------------
    print("🔹 Ultra-widefield -> test (Online-Challenge3-Evaluation)")
    for image_id, img_path in tqdm(uwf_eval_ids.items(), desc="UWF test", unit="img"):
        new_name = f"{prefix}_uwf_{image_id}.png"
        dst_path = os.path.join(img_dir, new_name)

        with Image.open(img_path) as im:
            im.resize(size=(512,512)).save(dst_path)
        crop_info = make_crop_info_from_image(img_path)

        classification = {}
        if image_id in uwf_ch3_map:
            dr = uwf_ch3_map[image_id]
            if dr not in DR_LEVEL_MAP:
                raise ValueError(f"Unknown DR level {dr} for {image_id} (Challenge3)")
            dr_text = DR_LEVEL_MAP[dr]
            if dr_text:
                classification["text"] = dr_text

        entry = {
            "image_name": new_name,
            "image_path": dst_path,
            "original_path": img_path,
            "crop_info": crop_info,
            "diagnosis": {
                "classification": classification
            }
        }
        annotations[new_name] = entry
        split["test"].append(new_name)

    # -----------------------------------------
    # 保存
    # -----------------------------------------
    os.makedirs(os.path.join(".", "experiments", "stat"), exist_ok=True)

    split_path = os.path.join(tar_path, "split.json")
    with open(split_path, "w", encoding="utf-8") as f:
        json.dump(split, f, indent=4)

    anno_path = os.path.join(tar_path, "annotations.json")
    with open(anno_path, "w", encoding="utf-8") as f:
        json.dump(annotations, f, indent=4)

    save_classification_stats(annotations, "./experiments/stat/deepdrid.json")

    print(f"✅ DeepDRiD 预处理完成：train {len(split['train'])}，test {len(split['test'])}，共 {len(annotations)} 张。")
    return annotations


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="DeepDRiD 数据集预处理（Regular + UWF）")
    parser.add_argument("--data_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_dataset/DeepDRiD-master",
                        help="DeepDRiD 原始数据根目录")
    parser.add_argument("--tar_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_processed/DeepDRiD",
                        help="预处理结果输出目录")
    parser.add_argument("--prefix", type=str, default="drid",
                        help="输出图片名称前缀")
    parser.add_argument("--resize", type=int, nargs=2, default=[512, 512],
                        help="常规眼底（regular）输出尺寸 (W H)")
    args = parser.parse_args()

    try:
        gather_data(args.data_path, args.tar_path, prefix=args.prefix, resize=tuple(args.resize))
    except Exception as e:
        print(f"❌ 程序中断: {e}")
        raise
