from concurrent.futures import ThreadPoolExecutor
from itertools import combinations
from typing import Dict, List, Tuple
import albumentations as A
import cv2
import os
import random
import logging
import numpy as np
import gc
from tqdm import tqdm
from params import Config
from utils.voc import load_voc_annotation, save_voc_annotation
from model import BBox
from log import logger


def apply_transform_and_save(combo: List[Tuple[str, A.BasicTransform]], image: np.ndarray, bboxes: List[BBox], config: Config):
    try:
        # Check if combo is a list
        if not isinstance(combo, list):
            logger.error(f"Combo is not a list: {combo}")
            return False
        # Check if all items are tuples of length 2
        if not all(isinstance(item, tuple) and len(item) == 2 for item in combo):
            logger.error(f"Invalid combo structure: {combo}")
            return False

        suffix = "_".join(name for name, _ in combo)
        transforms = [t for _, t in combo]
        logger.info(f"Processing combo: {[(name, type(t).__name__) for name, t in combo]}")

        bbox_coords = [[b.xmin, b.ymin, b.xmax, b.ymax] for b in bboxes]
        bbox_labels = [b.label for b in bboxes]
        logger.info(f"Applying transform {suffix} with {len(bbox_coords)} bboxes")

        spatial_transforms = [
            "distortions_elastic",
            "distortions_grid",
            "distortions_optical",
            "distortions_perspective",
            "distortions_rotate",
            "distortions_shift_scale_rotate",
            "quality_downscale",
            "special_coarse_dropout",
        ]
        has_spatial_transform = any(name in spatial_transforms for name, _ in combo)
        transform = A.Compose(
            transforms,
            bbox_params=A.BboxParams(format="pascal_voc", label_fields=["category_ids"]) if has_spatial_transform else None,
        )

        try:
            # 你的代码（例如应用变换）
            augmented = transform(image=image, bboxes=bbox_coords, category_ids=bbox_labels)
        except Exception as e:
            logging.exception(f"应用变换时出错: {str(e)}")

        aug_img = augmented["image"]
        aug_bboxes = augmented["bboxes"]
        aug_labels = augmented["category_ids"]

        save_name = f"{config.image_name}_{suffix}"
        dest_images_dir = os.path.join(config.dest, "images")
        dest_annotations_dir = os.path.join(config.dest, "annotations")
        os.makedirs(dest_images_dir, exist_ok=True)
        os.makedirs(dest_annotations_dir, exist_ok=True)

        img_save_path = os.path.join(dest_images_dir, f"{save_name}.{config.image_type}")
        xml_save_path = os.path.join(dest_annotations_dir, f"{save_name}.xml")

        cv2.imwrite(img_save_path, aug_img)
        save_voc_annotation(xml_save_path, f"{save_name}.{config.image_type}", aug_img.shape, aug_bboxes, aug_labels)
        
        # 释放内存
        del aug_img, augmented
        return True
    except Exception as e:
        logger.error(f"Error processing combo {suffix}: {str(e)}")
        return False


def generate_transform_combinations(
    distortions: Dict[str, A.BasicTransform],
    image: np.ndarray,
    bboxes: List[BBox],
    config: Config,
    max_combinations: int = 50,
    max_transforms_per_combo: int = 3,
    max_workers: int = 4,
) -> None:
    images_dir = os.path.join(config.dest, "images")
    annotations_dir = os.path.join(config.dest, "annotations")
    os.makedirs(images_dir, exist_ok=True)
    os.makedirs(annotations_dir, exist_ok=True)

    all_transforms = list(distortions.items())  # List of (name, transform) tuples
    combos = []

    # Single transforms
    combos.extend([[item] for item in all_transforms])  # Each combo is a list with one (name, transform) tuple

    # Double transforms
    if len(all_transforms) >= 2:
        double_combos = list(combinations(all_transforms, 2))  # Generates tuples of tuples
        random.shuffle(double_combos)
        # Convert each tuple of tuples to a list of tuples
        combos.extend([list(combo) for combo in double_combos[: min(len(double_combos), max_combinations // 3)]])

    # Triple transforms
    if len(all_transforms) >= 3 and max_transforms_per_combo >= 3:
        triple_combos = list(combinations(all_transforms, 3))
        random.shuffle(triple_combos)
        combos.extend([list(combo) for combo in triple_combos[: min(len(triple_combos), max_combinations // 3)]])

    # Special combos (already correctly formatted)
    special_combos = [
        [
            ("distortions_elastic", distortions["distortions_elastic"]),
            ("distortions_perspective", distortions["distortions_perspective"]),
        ],
        [
            ("color_random_brightness_contrast", distortions["color_random_brightness_contrast"]),
            ("blur_noise_gaussian_noise", distortions["blur_noise_gaussian_noise"]),
        ],
        [
            ("blur_noise_motion_blur", distortions["blur_noise_motion_blur"]),
            ("distortions_rotate", distortions["distortions_rotate"]),
        ],
    ]
    combos.extend(special_combos)

    # Limit total combinations
    if len(combos) > max_combinations:
        combos = random.sample(combos, max_combinations)

    logger.info(f"为 {config.image_name} 生成 {len(combos)} 个变换组合")

    # 多线程处理
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = []
        for combo in combos:
            future = executor.submit(apply_transform_and_save, combo, image, bboxes, config)
            futures.append(future)
        
        results = []
        for future in tqdm(futures, desc=f"处理 {config.image_name}"):
            results.append(future.result())

    success_count = sum(1 for r in results if r)
    logger.info(f"成功应用 {success_count}/{len(combos)} 个组合")
    
    # 强制垃圾回收
    gc.collect()


def save_original_image(image: np.ndarray, bboxes: List[BBox], config: Config):
    save_name = f"{config.image_name}_original"
    dest_images_dir = os.path.join(config.dest, "images")
    dest_annotations_dir = os.path.join(config.dest, "annotations")

    os.makedirs(dest_images_dir, exist_ok=True)
    os.makedirs(dest_annotations_dir, exist_ok=True)

    img_save_path = os.path.join(dest_images_dir, f"{save_name}.{config.image_type}")
    xml_save_path = os.path.join(dest_annotations_dir, f"{save_name}.xml")

    bbox_coords = [[b.xmin, b.ymin, b.xmax, b.ymax] for b in bboxes]
    bbox_labels = [b.label for b in bboxes]

    cv2.imwrite(img_save_path, image)
    save_voc_annotation(xml_save_path, f"{save_name}.{config.image_type}", image.shape, bbox_coords, bbox_labels)


if __name__ == "__main__":
    cfg = Config()
    # cfg.output = "./result"
    dir_path = os.path.join(cfg.base, cfg.dataset, "images")

    std_min = 3.16 / 255  # ≈ 0.0124
    std_max = 7.07 / 255  # ≈ 0.0277
    distortions = {
        "distortions_elastic": A.ElasticTransform(alpha=120, sigma=120 * 0.05, p=1.0),
        "distortions_grid": A.GridDistortion(num_steps=10, distort_limit=1, p=1.0),
        "distortions_optical": A.OpticalDistortion(distort_limit=0.5, p=1.0),
        "distortions_perspective": A.Perspective(scale=(0.05, 0.1), p=1.0),
        "distortions_rotate": A.Rotate(limit=45, p=1.0),
        "distortions_shift_scale_rotate": A.Affine(
            scale=(0.9, 1.1), translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, rotate=(-45, 45), p=1.0
        ),
        "color_hue_saturation": A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=30, val_shift_limit=20, p=1.0),
        "color_rgb_shift": A.RGBShift(r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, p=1.0),
        "color_random_brightness_contrast": A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=1.0),
        "color_clahe": A.CLAHE(clip_limit=4.0, tile_grid_size=(8, 8), p=1.0),
        "color_solarize": A.Solarize(threshold_range=(0.3, 0.7), p=1.0),  # checked
        "blur_noise_gaussian_blur": A.GaussianBlur(blur_limit=(3, 7), p=1.0),
        "blur_noise_motion_blur": A.MotionBlur(blur_limit=7, p=1.0),
        "blur_noise_median_blur": A.MedianBlur(blur_limit=7, p=1.0),
        "blur_noise_gaussian_noise": A.GaussNoise(std_range=(std_min, std_max), p=1.0),
        "blur_noise_iso_noise": A.ISONoise(color_shift=(0.01, 0.05), intensity=(0.1, 0.5), p=1.0),
        "quality_image_compression": A.ImageCompression(quality_range=(50, 100), p=1.0),
        "quality_downscale": A.Downscale(scale_range=(0.25, 0.5), p=1.0),
        "special_coarse_dropout": A.CoarseDropout(
            # num_holes_range 数量， hole_height_range 随机高度，hole_width_range随机宽度，p 权值
            num_holes_range=(16, 32),
            hole_height_range=(64, 128),
            hole_width_range=(64, 128),
            p=1.0,
        ),  # checked
        "special_channel_shuffle": A.ChannelShuffle(p=1.0),
    }

    # file_list = [r"Base_100001.png"]
    for filename in os.listdir(dir_path):
    # for filename in file_list:
        name, ext = os.path.splitext(filename)
        if ext.lower() not in [".jpg", ".jpeg", ".png", ".bmp"]:
            continue

        cfg.image_name = name
        cfg.image_type = ext.replace(".", "")

        try:
            image = cv2.imread(cfg.image_path())
            if image is None:
                raise FileNotFoundError(f"无法加载图像: {cfg.image_path()}")

            bbox_data, label_data = load_voc_annotation(cfg.xml_path())
            if not bbox_data or not label_data:
                logger.warning(f"No valid annotations for {cfg.image_name}. Skipping transformations.")
                continue

            bboxes = [BBox(xmin=b[0], ymin=b[1], xmax=b[2], ymax=b[3], label=l) for b, l in zip(bbox_data, label_data)]
            save_original_image(image, bboxes, cfg)

            try:
                # 主要脚本逻辑
                generate_transform_combinations(distortions, image, bboxes, cfg, max_combinations=25, max_transforms_per_combo=4)
            except Exception as e:
                logging.exception(f"主逻辑出错: {str(e)}")
        except Exception as e:
            logger.error(f"处理 {name}{ext} 时出错: {str(e)}")
            continue
