from preprossData.utils.crop_padding import crop_resize_save, crop_resize_save_with_info
from PIL import Image
from tqdm import tqdm
import numpy as np
import json
import hashlib
import sys
import os
sys.path.insert(0, os.path.abspath(
    os.path.join(os.path.dirname(__file__), '..', '..')))


class ImageCnter():
    def __init__(self):
        self.image_md5_dict = {}  # 保存 md5 -> image_id 的映射
        self.image_cnt = 1

    def get_md5(self, image_path):
        """
        计算图片完整像素数据的 MD5 哈希值，确保通道一致。
        """
        with Image.open(image_path) as img:
            img_rgb = img.convert("RGB")
            arr = np.array(img_rgb)
        m = hashlib.md5()
        m.update(str(arr.shape).encode("utf-8"))
        m.update(arr.tobytes())
        return m.hexdigest()

    def check_exist(self, md5):
        """
        检查给定的 md5 是否已经存在。
        """
        return md5 in self.image_md5_dict

    def insert(self, md5):
        """
        插入新的 md5, 返回分配的 image_id。
        """
        self.image_md5_dict[md5] = self.image_cnt
        image_id = self.image_cnt
        self.image_cnt += 1
        return image_id

    def get_image_id(self, md5):
        """
        获取给定 md5 对应的 image_id。
        """
        return self.image_md5_dict.get(md5)


def gather_data(data_path, tar_path, prefix_name='hvdrop', resize=(224, 224)):
    """
    预处理数据集，将数据整理成目标格式及文件结构。处理分为两部分：
      1. 文本标注：对图片进行裁剪和 resize，保存至 tar_path/images，并记录 diagnosis 的 text 标注；
      2. 分割标注：对 segmentation 图片及其对应 mask 进行裁剪和 resize，
         保存到对应目标文件夹，并在 diagnosis 中记录 mask 的相对路径。
    """
    # 初始化目标目录
    os.makedirs(tar_path, exist_ok=True)
    tar_image_dir = os.path.join(tar_path, 'images')
    os.makedirs(tar_image_dir, exist_ok=True)

    tar_vascular_dir = os.path.join(tar_path, 'vascular')
    tar_optic_disc_dir = os.path.join(tar_path, 'optic_disc')
    tar_rop_ridge_dir = os.path.join(tar_path, 'rop_ridge')
    os.makedirs(tar_vascular_dir, exist_ok=True)
    os.makedirs(tar_optic_disc_dir, exist_ok=True)
    os.makedirs(tar_rop_ridge_dir, exist_ok=True)

    # 源数据路径列表（文本标注与分割标注）
    src_related_path_list_text = [
        "HVDROPDB_RetCam_Neo_Classification/Neo_Normal",
        "HVDROPDB_RetCam_Neo_Classification/Neo_ROP",
        "HVDROPDB_RetCam_Neo_Classification/RetCam_Normal",
        "HVDROPDB_RetCam_Neo_Classification/RetCam_ROP",
    ]
    src_related_path_list_path = [
        "HVDROPDB_RetCam_Neo_Segmentation/HVDROPDB-BV/Neo_Vessels_images",
        "HVDROPDB_RetCam_Neo_Segmentation/HVDROPDB-BV/RetCam_Vessels_images",
        "HVDROPDB_RetCam_Neo_Segmentation/HVDROPDB-OD/Neo_OpticDisc_images",
        "HVDROPDB_RetCam_Neo_Segmentation/HVDROPDB-OD/Retcam_OpticDisc_images",
        "HVDROPDB_RetCam_Neo_Segmentation/HVDROPDB-RIDGE/Neo_Ridge_images",
        "HVDROPDB_RetCam_Neo_Segmentation/HVDROPDB-RIDGE/RetCam_Ridge_images",
    ]
    label_list_text = [
        "normal",
        "Retinopathy of Prematurity positive",
        "normal",
        "Retinopathy of Prematurity positive"
    ]
    label_list_path = [
        "segmentation:vascular",
        "segmentation:vascular",
        "segmentation:optic disc",
        "segmentation:optic disc",
        "segmentation:retinopathy of prematurity ridge",
        "segmentation:retinopathy of prematurity ridge"
    ]

    image_cnter = ImageCnter()
    data_dict = {}

    # --- 处理文本标注数据 ---
    for src_related_path, group_label in zip(src_related_path_list_text, label_list_text):
        src_abs_path = os.path.join(data_path, src_related_path)
        image_name_list = sorted(os.listdir(src_abs_path))
        for image_ori_name in image_name_list:
            image_ori_path = os.path.join(src_abs_path, image_ori_name)
            md5 = image_cnter.get_md5(image_ori_path)
            if image_cnter.check_exist(md5):
                image_id = image_cnter.get_image_id(md5)
            else:
                image_id = image_cnter.insert(md5)

            new_image_name = f"{prefix_name}_{image_id}.png"
            new_image_path = os.path.join('images', new_image_name)

            # 裁剪并 resize 图片，保存至 tar_path/images，并返回 info
            info = crop_resize_save(
                image_path=image_ori_path,
                save_path=os.path.join(tar_path, new_image_path),
                resize=resize,
                crop_threshold=25
            )

            data_dict[new_image_name] = {
                'image_name': new_image_name,
                'image_path': new_image_path,
                'original_path': os.path.relpath(image_ori_path, data_path),
                'crop_info': info,
                'diagnosis': {
                    'text': group_label
                }
            }

    # --- 处理分割标注数据（mask） ---
    # 对于每个 segmentation images 文件夹，对应有一个 masks 文件夹（将 "_images" 替换为 "_masks"）
    for src_related_path, group_label in zip(src_related_path_list_path, label_list_path):
        src_abs_path = os.path.join(data_path, src_related_path)
        masks_abs_path = src_abs_path.replace("_images", "_masks")
        image_name_list = sorted(os.listdir(src_abs_path))
        for image_ori_name in image_name_list:
            image_ori_path = os.path.join(src_abs_path, image_ori_name)
            md5 = image_cnter.get_md5(image_ori_path)
            if image_cnter.check_exist(md5):
                image_id = image_cnter.get_image_id(md5)
            else:
                image_id = image_cnter.insert(md5)
            new_image_name = f"{prefix_name}_{image_id}.png"
            new_image_path = os.path.join('images', new_image_name)

            # 使用 crop_resize_save 返回 info，并处理 segmentation image
            info = crop_resize_save(
                image_path=image_ori_path,
                save_path=os.path.join(tar_path, new_image_path),
                resize=resize,
                crop_threshold=25
            )
            if new_image_name not in data_dict:
                data_dict[new_image_name] = {
                    'image_name': new_image_name,
                    'image_path': new_image_path,
                    'original_path': os.path.relpath(image_ori_path, data_path),
                    'crop_info': info,
                    'diagnosis': {}
                }
            # 记录 segmentation image 的保存路径（先保存标准图的路径）
            data_dict[new_image_name]['diagnosis'][group_label] = new_image_path

            # 处理对应 mask 文件（要求 mask 与 image 同名）
            mask_ori_path = os.path.join(masks_abs_path, image_ori_name)
            if os.path.exists(mask_ori_path):
                # 调用新函数对 mask 进行裁剪和 resize，使用标准图的 info 进行同样处理
                # 生成 mask 的保存路径，存放到对应目标文件夹
                if group_label == "segmentation:vascular":
                    mask_save_dir = tar_vascular_dir
                elif group_label == "segmentation:optic disc":
                    mask_save_dir = tar_optic_disc_dir
                elif "ridge" in group_label.lower():
                    mask_save_dir = tar_rop_ridge_dir
                else:
                    raise ValueError(f"Unexpect group label {group_label}")
                os.makedirs(mask_save_dir, exist_ok=True)
                mask_save_path = os.path.join(mask_save_dir, new_image_name)
                # 处理 mask
                crop_resize_save_with_info(
                    image_path=mask_ori_path,
                    save_path=mask_save_path,
                    info=info,
                    resize=resize
                )
                # 更新 diagnosis 中对应 segmentation 标注的值为 mask 的相对路径（相对于 tar_path）
                data_dict[new_image_name]['diagnosis'][group_label] = os.path.relpath(
                    mask_save_path, tar_path)
            else:
                print(f"Mask file not found for image: {image_ori_path}")

    return data_dict


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description="数据集预处理，并生成标注信息")
    parser.add_argument("--data_path", type=str, default="../Dataset/public_dataset",
                        help="原始数据集路径，默认为 '../Dataset/public_dataset'")
    parser.add_argument("--tar_path", type=str, default="../Dataset/processsed224_public/hvdROP",
                        help="处理后数据存放路径，默认为 '../Dataset/processsed224_public/hvdROP'")
    parser.add_argument("--prefix_name", type=str, default="hvdrop",
                        help="处理后图片名称的前缀，默认为 'hvdrop'")
    parser.add_argument("--resize", type=int, nargs=2, default=[224, 224],
                        help="目标尺寸（宽 高），默认为 224 224")
    args = parser.parse_args()

    processed_data_dict = gather_data(
        data_path=args.data_path,
        tar_path=args.tar_path,
        prefix_name=args.prefix_name,
        resize=tuple(args.resize)
    )
    json_save_path = os.path.join(args.tar_path, "annotations.json")
    with open(json_save_path, "w") as f:
        json.dump(processed_data_dict, f, indent=4)
    print(f"Preprocessing completed. Annotations saved to {json_save_path}")
