# -*- coding: utf-8 -*-
"""
功能:
    对 BIDR 糖尿病视网膜病变数据集进行预处理。
    数据集结构如下：
        BIDR/
        ├── train/
        │   ├── DR/
        │   └── No_DR/
        ├── valid/
        │   ├── DR/
        │   └── No_DR/
        └── test/
            ├── DR/
            └── No_DR/

    处理流程：
        1. 将 train/ 与 valid/ 合并为统一的训练集；
        2. 读取每个类别文件夹中的 JPG 图片；
        3. 对图像执行 crop + resize 操作（512x512）；
        4. 命名为 {prefix}_{split}_{序号}.png 并保存；
        5. 构建 annotations.json 和 split.json；
        6. 输出统计结果到 ./experiments/stat/bidr.json。

使用示例:
    python bidr_preprocess.py \
        --data_path /home/zhangpinglu/data0/gy/Dataset/public_dataset/BIDR \
        --tar_path /home/zhangpinglu/data0/gy/Dataset/public_processed/BIDR \
        --prefix bidr
"""

import os
import sys
import json
import argparse
from tqdm import tqdm
from data_preprocess.utils.crop import crop_resize_save
from data_preprocess.utils.stat import save_classification_stats


def gather_data(data_path, tar_path, prefix="bidr"):
    """
    处理 BIDR 数据集，生成统一的预处理结果。
    Args:
        data_path (str): 原始数据集路径
        tar_path (str): 预处理后数据保存路径
        prefix (str): 输出文件前缀
    Returns:
        dict: 标注信息字典
    """
    os.makedirs(tar_path, exist_ok=True)
    images_dir = os.path.join(tar_path, "images")
    os.makedirs(images_dir, exist_ok=True)

    split_dict = {"train": [], "test": []}
    data_dict = {}

    # train = train + valid
    train_splits = ["train", "valid"]
    test_splits = ["test"]

    # 类别映射
    label_map = {
        "No_DR": "normal",
        "DR": "Diabetic Retinopathy"
    }

    # ============ 处理 train + valid ============
    print("\n🔹 开始处理训练集(train + valid)")
    idx = 0
    for split in train_splits:
        for cls_name, cls_text in label_map.items():
            cls_dir = os.path.join(data_path, split, cls_name)
            if not os.path.exists(cls_dir):
                raise FileNotFoundError(f"路径不存在: {cls_dir}")
            image_list = sorted(os.listdir(cls_dir))
            for image_name in tqdm(image_list, desc=f"{split}-{cls_name}", unit="images"):
                src_path = os.path.join(cls_dir, image_name)
                image_id = os.path.splitext(image_name)[0]
                new_image_name = f"{prefix}_train_{idx:05d}.png"
                dest_rel_path = os.path.join("images", new_image_name)
                dest_abs_path = os.path.join(tar_path, dest_rel_path)

                try:
                    crop_info = crop_resize_save(
                        image_path=src_path,
                        save_path=dest_abs_path,
                        resize=(512, 512),
                        crop_threshold=25
                    )
                except Exception as e:
                    raise RuntimeError(f"处理图像出错: {src_path}, 错误信息: {str(e)}")

                data_dict[new_image_name] = {
                    "image_name": new_image_name,
                    "image_path": dest_abs_path,
                    "original_path": src_path,
                    "crop_info": crop_info,
                    "diagnosis": {
                        "classification": {"text": cls_text}
                    }
                }
                split_dict["train"].append(new_image_name)
                idx += 1

    # ============ 处理 test ============
    print("\n🔹 开始处理测试集(test)")
    for cls_name, cls_text in label_map.items():
        cls_dir = os.path.join(data_path, "test", cls_name)
        if not os.path.exists(cls_dir):
            raise FileNotFoundError(f"路径不存在: {cls_dir}")
        image_list = sorted(os.listdir(cls_dir))
        for image_name in tqdm(image_list, desc=f"test-{cls_name}", unit="images"):
            src_path = os.path.join(cls_dir, image_name)
            image_id = os.path.splitext(image_name)[0]
            new_image_name = f"{prefix}_test_{image_id}.png"
            dest_rel_path = os.path.join("images", new_image_name)
            dest_abs_path = os.path.join(tar_path, dest_rel_path)

            try:
                crop_info = crop_resize_save(
                    image_path=src_path,
                    save_path=dest_abs_path,
                    resize=(512, 512),
                    crop_threshold=25
                )
            except Exception as e:
                raise RuntimeError(f"处理图像出错: {src_path}, 错误信息: {str(e)}")

            data_dict[new_image_name] = {
                "image_name": new_image_name,
                "image_path": dest_abs_path,
                "original_path": src_path,
                "crop_info": crop_info,
                "diagnosis": {
                    "classification": {"text": cls_text}
                }
            }
            split_dict["test"].append(new_image_name)

    # ============ 保存 JSON ============
    print("\n💾 保存 split.json 与 annotations.json")
    split_path = os.path.join(tar_path, "split.json")
    annotations_path = os.path.join(tar_path, "annotations.json")

    with open(split_path, "w", encoding="utf-8") as f:
        json.dump(split_dict, f, indent=4)

    with open(annotations_path, "w", encoding="utf-8") as f:
        json.dump(data_dict, f, indent=4)

    # ============ 保存统计结果 ============
    os.makedirs("./experiments/stat", exist_ok=True)
    save_classification_stats(data_dict, os.path.join("./experiments/stat", "bidr.json"))

    print("\n✅ 数据预处理完成！共计:")
    print(f"  训练集: {len(split_dict['train'])} 张")
    print(f"  测试集: {len(split_dict['test'])} 张")

    return data_dict


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="BIDR 数据集预处理")
    parser.add_argument("--data_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_dataset/BIDR/Diagnosis of Diabetic Retinopathy",
                        help="原始数据集根目录，包含 train/ valid/ test 子目录")
    parser.add_argument("--tar_path", type=str,
                        default="/home/zhangpinglu/data0/gy/Dataset/public_processed/BIDR",
                        help="预处理后数据存放目录")
    parser.add_argument("--prefix", type=str, default="bidr",
                        help="输出图片名前缀，默认为 'bidr'")
    args = parser.parse_args()

    annotations = gather_data(args.data_path, args.tar_path, prefix=args.prefix)
