import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
import hashlib
import json
import csv
import argparse
from preprossData.utils.crop_padding import crop_resize_save, crop_resize_save_with_info
from PIL import Image

def compute_md5(image_path):
    """
    计算图片的 MD5 值，打开图片后转换为 RGB 模式，计算其字节流的 MD5。
    """
    with Image.open(image_path) as img:
        img_rgb = img.convert("RGB")
        data = img_rgb.tobytes()
    return hashlib.md5(data).hexdigest()

def parse_csv(csv_path):
    """
    通用 CSV 解析函数，返回字典：
      key 为 CSV 中的 "Image No" 或 "Image name"（去除空格），
      value 为整行数据（字典）。
    """
    result = {}
    with open(csv_path, newline='', encoding='utf-8') as f:
        reader = csv.DictReader(f)
        for row in reader:
            key = (row.get("Image No") or row.get("Image name") or "").strip()
            if key:
                result[key] = row
    return result

def gather_data(data_path, tar_path, resize=(224,224), prefiex='idrid_'):
    """
    针对 IDRiD 数据集进行预处理，包含三个任务：
      1. 分割任务（Segmentation）：处理原始图像与对应的各类 mask。
      2. 分级任务（Grading）：从 CSV 中提取疾病分级信息，并合并重复标注。
      3. 定位任务（Localization）：读取定位任务 CSV，调整坐标（基于裁剪信息）。
    
    预处理结果输出到目标文件夹中（在 data_path 下创建 Processed 目录），
    其中包括：
      - images/：存放经过裁剪+resize 后的图像（统一为 PNG 格式）。
      - segmentation 相关文件夹（microaneurysms, haemorrhages, hard_exudates, soft_exudates, optic_disc）。
      - split/official.json：保存各任务的划分信息。
      - annotations.json：保存所有图像的标注信息（包括分割、分级、定位）。
    
    返回：
        data_dict (dict): 汇总的标注信息字典。
    """
    # 目标根目录
    os.makedirs(tar_path, exist_ok=True)
    
    # 创建 images 目录
    images_dir = os.path.join(tar_path, "images")
    os.makedirs(images_dir, exist_ok=True)
    
    # 创建分割任务的目标目录
    seg_dirs = {
        'segmentation:microaneurysms': os.path.join(tar_path, "microaneurysms"),
        'segmentation:haemorrhages': os.path.join(tar_path, "haemorrhages"),
        'segmentation:hard exudates': os.path.join(tar_path, "hard_exudates"),
        'segmentation:soft exudates': os.path.join(tar_path, "soft_exudates"),
        'segmentation:optic disc': os.path.join(tar_path, "optic_disc")
    }
    for d in seg_dirs.values():
        os.makedirs(d, exist_ok=True)
    
    # 用于保存分割、分级、定位的划分信息
    official_split_seg = {}    # 分割任务划分
    official_split_cls = {}    # 分级任务划分
    official_split_loc = {}    # 定位任务划分
    # md5_dict 用于检测重复图像（分割部分）
    md5_dict = {}  # md5 -> target_image_name
    # 保存每张图像的裁剪信息（由 crop_resize_save_with_info 返回），用于后续定位坐标的调整
    crop_info_dict = {}
    # 最终的标注字典
    data_dict = {}
    
    #############################
    # 1. 处理分割任务 (Segmentation)
    #############################
    split_map = {
        'a. Training Set': 'train',
        'b. Testing Set': 'test'
    }
    segmentation_label_dict = {
        '1. Microaneurysms': 'segmentation:microaneurysms',
        '2. Haemorrhages':  'segmentation:haemorrhages',
        '3. Hard Exudates':  'segmentation:hard exudates',
        '4. Soft Exudates':  'segmentation:soft exudates',
        '5. Optic Disc':     'segmentation:optic disc'
    }
    mask_prefix = {
        '1. Microaneurysms': '_MA.tif',
        '2. Haemorrhages':  '_HE.tif',
        '3. Hard Exudates':  '_EX.tif',
        '4. Soft Exudates':  '_SE.tif',
        '5. Optic Disc':     '_OD.tif'
    }
    seg_root = os.path.join(data_path, 'A. Segmentation')
    for split_folder, split_key in split_map.items():
        image_dir = os.path.join(seg_root, '1. Original Images', split_folder)
        image_name_list = sorted(os.listdir(image_dir))
        official_split_seg.setdefault(split_key, [])
        
        for image_name in image_name_list:
            src_image_path = os.path.join(image_dir, image_name)
            img_md5 = compute_md5(src_image_path)
            # 检查重复（此处代码逻辑假设不会出现重复）
            if img_md5 in md5_dict:
                target_image_name = md5_dict[img_md5]
                raise ValueError(f"这里不可能出现重复，如果出现重复，一定是代码哪里写错了")
            else:
                # 修改：在目标文件名前加上前缀
                target_image_name = prefiex + image_name
                md5_dict[img_md5] = target_image_name
                dest_image_path = os.path.join(images_dir, target_image_name)
                # 保存图像并获取裁剪信息
                info = crop_resize_save(src_image_path, dest_image_path, resize=resize, crop_threshold=25)
                crop_info_dict[target_image_name] = info
            official_split_seg[split_key].append(target_image_name)
            
            # 处理各个 mask
            seg_info = {}
            for seg_label, diagnosis_key in segmentation_label_dict.items():
                mask_file = image_name[:-4] + mask_prefix[seg_label]
                mask_path = os.path.join(
                    seg_root,
                    '2. All Segmentation Groundtruths',
                    split_folder,
                    seg_label,
                    mask_file
                )
                if not os.path.exists(mask_path):
                    print(f"Warning: Mask file does not exist: {mask_path}")
                    continue
                dest_mask_path = os.path.join(seg_dirs[diagnosis_key], target_image_name)
                crop_resize_save_with_info(mask_path, dest_mask_path, info=info, resize=resize)
                seg_info[diagnosis_key] = os.path.relpath(dest_mask_path, tar_path)
            
            # 保存分割任务的标注信息
            data_dict[target_image_name] = {
                'image_name': target_image_name,
                'image_path': os.path.relpath(os.path.join(images_dir, target_image_name), tar_path),
                'original_path': os.path.relpath(src_image_path, data_path),
                'crop_info':info,
                'diagnosis': seg_info
            }
    
    #############################
    # 2. 处理分级任务 (Grading)
    #############################
    grading_root = os.path.join(data_path, 'B. Disease Grading')
    label_parser = {
        'Retinopathy grade': {
            '0': "normal",
            '1': "mild Diabetic Retinopathy",
            '2': "moderate Diabetic Retinopathy",
            '3': "severe Diabetic Retinopathy",
            '4': "proliferative Diabetic Retinopathy"
        },
        'Risk of macular edema': {
            '0': "normal",
            '1': "Mild/Moderate Diabetic Macular Edema",
            '2': "Severe Diabetic Macular Edema"
        }
    }
    grading_csv = {
        'train': os.path.join(grading_root, '2. Groundtruths', 'a. IDRiD_Disease Grading_Training Labels.csv'),
        'test': os.path.join(grading_root, '2. Groundtruths', 'b. IDRiD_Disease Grading_Testing Labels.csv')
    }
    grading_labels = {}
    for split_key in grading_csv:
        grading_labels[split_key] = parse_csv(grading_csv[split_key])
    official_split_cls = {'train': [], 'test': []}

    for split_folder, split_key in split_map.items():
        image_dir = os.path.join(grading_root, '1. Original Images', split_folder)
        image_name_list = sorted(os.listdir(image_dir))
        for image_name in image_name_list:
            image_id = os.path.splitext(image_name)[0]
            src_image_path = os.path.join(image_dir, image_name)
            img_md5 = compute_md5(src_image_path)
            if img_md5 not in md5_dict:
                # 修改：加上前缀
                target_image_name = prefiex + image_name
                md5_dict[img_md5] = target_image_name
                dest_image_path = os.path.join(images_dir, target_image_name)
                info=crop_resize_save(src_image_path, dest_image_path, resize=resize, crop_threshold=15)
                crop_info_dict[target_image_name]=info
            else:
                target_image_name = md5_dict[img_md5]
            grading_info = grading_labels[split_key].get(image_id)
            if grading_info is None:
                raise ValueError(f"Grading info for image id '{image_id}' not found in CSV for split '{split_key}'.")
            grade1 = grading_info.get('Retinopathy grade', None).strip()
            grade2 = grading_info.get('Risk of macular edema ', None).strip()
            if (not grade1) or (not grade2):
                raise ValueError(f"{image_name} has a empty grade reti {grade1}, ema {grade2}")
                
            def combine_labels(l1, l2):
                if l1 == "0" and l2 == "0":
                    return label_parser['Retinopathy grade']['0']
                elif l1 != "0" and l2 == "0":
                    return label_parser['Retinopathy grade'][l1]
                elif l1 == "0" and l2 != "0":
                    return label_parser['Risk of macular edema'][l2]
                else:
                    return label_parser['Retinopathy grade'][l1] + ", " + label_parser['Risk of macular edema'][l2]
            combined_label = combine_labels(grade1, grade2)
            if combined_label == "" or combined_label.lower() == "normal":
                combined_label = "normal"
            if target_image_name in data_dict:
                existing = data_dict[target_image_name].get('diagnosis', {}).get('text', "")
                if not existing or existing.lower() == "normal":
                    data_dict[target_image_name]['diagnosis']['text'] = combined_label
                else:
                    if combined_label.lower() != "normal" and combined_label not in existing:
                        data_dict[target_image_name]['diagnosis']['text'] = existing + ", " + combined_label
            else:
                dest_image_path = os.path.join(images_dir, target_image_name)
                crop_info=crop_resize_save(src_image_path, dest_image_path, resize=resize, crop_threshold=25)
                data_dict[target_image_name] = {
                    'image_name': target_image_name,
                    'image_path': os.path.relpath(dest_image_path, tar_path),
                    'original_path': os.path.relpath(src_image_path, data_path),
                    'crop_info':crop_info,
                    'diagnosis': {'text': combined_label}
                }
            official_split_cls[split_key].append(target_image_name)
    
    #############################
    # 3. 处理定位任务 (Localization)
    #############################
    # 在定位任务中，首先处理 csv 文件，再遍历定位图像并处理
    loc_root = os.path.join(data_path, 'C. Localization')
    official_split_loc = {'train': [], 'test': []}


    # 处理定位任务的 CSV 文件
    label_dict_path = {
        '1. Optic Disc Center Location': 'location:optic disc',
        '2. Fovea Center Location': 'location:fovea'
    }

    # 解析 CSV 文件，分别处理训练集和测试集
    loc_csv_files = {}
    for task, diagnosis_key in label_dict_path.items():
        if task == '1. Optic Disc Center Location':
            csv_train = os.path.join(loc_root, '2. Groundtruths', task, 'a. IDRiD_OD_Center_Training Set_Markups.csv')
            csv_test = os.path.join(loc_root, '2. Groundtruths', task, 'b. IDRiD_OD_Center_Testing Set_Markups.csv')
        elif task == '2. Fovea Center Location':
            csv_train = os.path.join(loc_root, '2. Groundtruths', task, 'IDRiD_Fovea_Center_Training Set_Markups.csv')
            csv_test = os.path.join(loc_root, '2. Groundtruths', task, 'IDRiD_Fovea_Center_Testing Set_Markups.csv')
        else:
            continue
        loc_csv_files[task] = {
            'train': parse_csv(csv_train),
            'test': parse_csv(csv_test)
        }

    # 遍历 '1. Original Images' 中的所有图像
    image_dir = os.path.join(loc_root, '1. Original Images')
    
    split_folder_dict={
        'a. Training Set':'train',
        'b. Testing Set':'test'
    }
    # 遍历图像并处理定位任务
    for split_folder in split_folder_dict:
        split_name=split_folder_dict[split_folder]
        image_name_list = sorted(os.listdir(os.path.join(image_dir,split_folder)))
        for image_name in image_name_list:
            
            # 计算图像的 MD5
            src_image_path = os.path.join(image_dir, split_folder,image_name)
            img_md5 = compute_md5(src_image_path)

            # 如果之前处理过该图像
            if img_md5 in md5_dict:
                target_image_name = md5_dict[img_md5]
            else:
                # 如果没有处理过，进行处理并生成新的目标文件名（带上前缀）
                target_image_name = prefiex + image_name
                md5_dict[img_md5] = target_image_name
                dest_image_path = os.path.join(images_dir, target_image_name)
                info = crop_resize_save(src_image_path, dest_image_path, resize=resize, crop_threshold=25)
                crop_info_dict[target_image_name] = info

            official_split_loc[split_name].append(target_image_name)
            # 遍历定位任务的两个任务（Optic Disc 和 Fovea）
            for task, diagnosis_key in label_dict_path.items():
                # 获取训练集或测试集的标签（定位任务分为两个 CSV 文件）
                split_key = 'train' if image_name in loc_csv_files[task]['train'] else 'test'
                csv_data = loc_csv_files[task][split_key]
                image_id = os.path.splitext(image_name)[0]  # 无后缀的 image_id
                loc_info = csv_data.get(image_id)

                # 如果找到了定位信息，进行坐标调整
                if loc_info:
                    x = float(loc_info.get('X- Coordinate', 0))
                    y = float(loc_info.get('Y - Coordinate', 0))

                    # 获取裁剪信息并调整坐标
                    if target_image_name in crop_info_dict:
                        top, bottom, left, right = crop_info_dict[target_image_name].get('crop_box', (0, 0, 0, 0))
                        cropped_width = right - left
                        cropped_height = bottom - top
                        adjusted_x = (x - left) * (resize[0] / cropped_width)
                        adjusted_y = (y - top) * (resize[1] / cropped_height)
                    else:
                        adjusted_x, adjusted_y = x, y
                        raise ValueError(f"没有找到裁剪信息：{target_image_name}")

                    # 如果该图像已经存在标注，则更新定位信息
                    if target_image_name in data_dict:
                        data_dict[target_image_name]['diagnosis'][diagnosis_key] = {'x': adjusted_x, 'y': adjusted_y}
                    else:
                        data_dict[target_image_name] = {
                            'image_name': target_image_name,
                            'image_path': os.path.relpath(dest_image_path, tar_path),
                            'original_path': os.path.relpath(src_image_path, data_path),
                            'crop_info':crop_info_dict[target_image_name],
                            'diagnosis': {diagnosis_key: {'x': adjusted_x, 'y': adjusted_y}}
                        }


    split_dir = os.path.join(tar_path, "split")
    os.makedirs(split_dir, exist_ok=True)
    segmentation_split_json_path = os.path.join(split_dir, "segmentation_official.json")
    with open(segmentation_split_json_path, "w", encoding="utf-8") as f:
        json.dump(official_split_seg, f, indent=4)
    grading_split_json_path = os.path.join(split_dir, "grading_official.json")
    with open(grading_split_json_path, "w", encoding="utf-8") as f:
        json.dump(official_split_cls, f, indent=4)
    localization_split_json_path = os.path.join(split_dir, "localization_official.json")
    with open(localization_split_json_path, "w", encoding="utf-8") as f:
        json.dump(official_split_loc, f, indent=4)
    annotations_json_path = os.path.join(tar_path, "annotations.json")
    with open(annotations_json_path, "w", encoding="utf-8") as f:
        json.dump(data_dict, f, indent=4)
    
    return data_dict

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="IDRiD 数据集预处理程序")
    parser.add_argument("--data_path", type=str, default="/mnt/e/Document_Workspace/Dataset/public_dataset/IDRiD/",
                        help="新数据集根目录，要求其下包含类别文件夹")
    parser.add_argument("--tar_path", type=str, default="/mnt/e/Document_Workspace/Dataset/processed224_new/IDRiD",
                        help="预处理后数据存放目录")
    args = parser.parse_args()
    
    annotations = gather_data(args.data_path, args.tar_path)
    print("Preprocessing completed.")
