import os
import json
import random
from collections import defaultdict
from tqdm import tqdm
from PIL import Image
import numpy as np
from data_preprocess.utils.crop import crop_resize_save


# ========== code 1: 按 病人ID 划分 train/test ==========
def generate_split(data_path, test_ratio=0.2, seed=42):
    """
    按病人 ID（即文件名第一个'_'前的部分）划分 train/test 集合
    """
    random.seed(seed)

    ignore_dirs = {"不可用", "其他眼底病", "进一步检查"}
    id_to_images = defaultdict(list)

    for disease_dir in os.listdir(data_path):
        disease_path = os.path.join(data_path, disease_dir)
        if not os.path.isdir(disease_path) or disease_dir in ignore_dirs:
            continue

        for img_name in os.listdir(disease_path):
            if not (img_name.lower().endswith('.jpg') or img_name.lower().endswith('.png')):
                continue
            img_id = img_name.split('_')[0]  # 提取 ID
            id_to_images[img_id].append(img_name)

    all_ids = list(id_to_images.keys())
    random.shuffle(all_ids)

    test_size = int(len(all_ids) * test_ratio)
    test_ids = set(all_ids[:test_size])
    train_ids = set(all_ids[test_size:])

    split_result = {"train": [], "test": []}
    for pid, imgs in id_to_images.items():
        if pid in test_ids:
            split_result["test"].extend(imgs)
        else:
            split_result["train"].extend(imgs)

    print(f"✅ Train: {len(split_result['train'])} images | Test: {len(split_result['test'])} images")

    return split_result


# ========== bbox 距离与合并逻辑 ==========
def bbox_distance(b1, b2):
    """计算两个 bbox 之间的最小距离（0~1范围），如果重叠返回0"""
    x1_min, y1_min, x1_max, y1_max = b1
    x2_min, y2_min, x2_max, y2_max = b2

    # 如果重叠，返回0
    if not (x1_max < x2_min or x2_max < x1_min or y1_max < y2_min or y2_max < y1_min):
        return 0.0

    # 否则计算水平和垂直方向的间距
    dx = max(x2_min - x1_max, x1_min - x2_max, 0)
    dy = max(y2_min - y1_max, y1_min - y2_max, 0)
    return np.sqrt(dx ** 2 + dy ** 2)


def merge_close_bboxes(bboxes, threshold=0.003):
    """合并同类间距离较近的bbox"""
    merged = []
    used = set()
    for i in range(len(bboxes)):
        if i in used:
            continue
        cur = bboxes[i]
        for j in range(i + 1, len(bboxes)):
            if j in used:
                continue
            if bbox_distance(cur, bboxes[j]) <= threshold:
                # 合并两个框
                x_min = min(cur[0], bboxes[j][0])
                y_min = min(cur[1], bboxes[j][1])
                x_max = max(cur[2], bboxes[j][2])
                y_max = max(cur[3], bboxes[j][3])
                cur = [x_min, y_min, x_max, y_max]
                used.add(j)
        merged.append(cur)
    return merged


# ========== code 2: 图像处理与标注整合 ==========
def preprocess_dataset(data_root, tar_dir, split=None, crop_size=(512, 512)):
    os.makedirs(tar_dir, exist_ok=True)
    os.system(f"rm -rf {tar_dir}/*")
    os.makedirs(os.path.join(tar_dir, 'images'), exist_ok=True)

    json_path = os.path.join(data_root, 'final_sign_result.json')
    with open(json_path, 'r', encoding='utf-8') as f:
        json_data_ori = json.load(f)

    # 替换 key 中的空格
    json_data = {k.replace(' ', '_'): v for k, v in json_data_ori.items()}

    ignore_dirs = {"不可用", "其他眼底病", "进一步检查"}
    image_label_folder = [f for f in os.listdir(data_root)
                          if os.path.isdir(os.path.join(data_root, f)) and f not in ignore_dirs]

    annotations = {}

    for folder in image_label_folder:
        folder_path = os.path.join(data_root, folder)
        image_names = os.listdir(folder_path)
        for image_name in tqdm(image_names, desc=f'Processing {folder}'):
            src_image_path = os.path.join(folder_path, image_name)
            if image_name not in json_data:
                continue

            image = Image.open(src_image_path)
            width, height = image.size

            crop_info = crop_resize_save(
                src_image_path,
                os.path.join(tar_dir, 'images', image_name),
                resize=crop_size,
                crop_threshold=25
            )
            top, bottom, left, right = crop_info['crop_box']

            lesions = defaultdict(list)
            for item in json_data[image_name]:
                try:
                    lesion_name = item['props']['name']
                    shape = item['shape']

                    # 计算crop后的坐标
                    if 'width' in shape and 'height' in shape:
                        x1 = (shape['x'] - left) / (right - left)
                        y1 = (shape['y'] - top) / (bottom - top)
                        w = shape['width'] / (right - left)
                        h = shape['height'] / (bottom - top)
                        bbox = [x1, y1, x1 + w, y1 + h]
                    elif 'sr' in shape:
                        x1 = (shape['x'] - shape['sr'] - left) / (right - left)
                        y1 = (shape['y'] - shape['sr'] - top) / (bottom - top)
                        w = 2 * shape['sr'] / (right - left)
                        h = 2 * shape['sr'] / (bottom - top)
                        bbox = [x1, y1, x1 + w, y1 + h]
                    else:
                        continue

                    bbox = [max(0, min(1, v)) for v in bbox]  # clip to [0,1]
                    lesions[lesion_name].append(bbox)

                except KeyError as e:
                    print(f"⚠️ KeyError in {image_name}: {e}")
                    continue

            # 合并同类过近的框
            merged_lesions = {ln: merge_close_bboxes(bboxes, threshold=0.003)
                              for ln, bboxes in lesions.items()}

            dest_image_path = os.path.join(tar_dir, 'images', image_name)
            data_dict = {
                'image_name': image_name,
                'image_path': dest_image_path,
                'original_path': src_image_path,
                'crop_info': crop_info,
                'diagnosis': {
                    'classification': {'text': [folder]}, #这里是一个设计，是为了之后兼容多疾病
                    'detection': merged_lesions
                }
            }
            annotations[image_name] = data_dict

    out_path = os.path.join(tar_dir, 'annotations.json')
    with open(out_path, 'w', encoding='utf-8') as f:
        json.dump(annotations, f, indent=4, ensure_ascii=False)
    print(f"✅ Saved processed annotations to {out_path}")


# ========== 主流程入口 ==========
if __name__ == "__main__":
    import argparse

    parser = argparse.ArgumentParser(description="Process Aier dataset with patient-level split and preprocessing.")
    parser.add_argument("--data_root", type=str, default='/home/zhangpinglu/data0/gy/Dataset/aier_orignal',
                        help="Path to the root directory of the original dataset (e.g. /home/.../aier_orignal)")
    parser.add_argument("--tar_dir", type=str, default='/home/zhangpinglu/data0/gy/Dataset/aier_processed',
                        help="Path to save the processed dataset (e.g. /home/.../aier_processed)")
    parser.add_argument("--test_ratio", type=float, default=0.2,
                        help="Proportion of data used for testing (default: 0.2)")
    parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility")
    args = parser.parse_args()
    os.makedirs(args.tar_dir, exist_ok=True)
    save_split_path = os.path.join(args.tar_dir, 'split.json')

    split = generate_split(args.data_root, test_ratio=args.test_ratio, seed=args.seed)
    with open(save_split_path, "w", encoding="utf-8") as f:
        json.dump(split, f, ensure_ascii=False, indent=2)

    preprocess_dataset(args.data_root, args.tar_dir, split=split)
