import os
from enum import Enum

import numpy as np
import PIL
import torch
from torchvision import transforms
from skimage import filters
import cv2
import click

IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]


class DatasetSplit(Enum):
    TRAIN = "train"
    VAL = "val"
    TEST = "test"


class DroneInspectionDataset(torch.utils.data.Dataset):
    """
    PyTorch Dataset for Drone Blade Inspection with RGB and Depth images.
    """

    def __init__(
        self,
        source,
        classname,
        resize=256,
        imagesize=224,
        split=DatasetSplit.TEST,
        train_val_split=1.0,
        depth_threshold=0.3,  # 深度阈值，用于生成前景掩膜
        **kwargs,
    ):
        """
        Args:
            source: [str] Path to the data folder containing RGB and depth images.
            classname: [str] Name of class/category.
            resize: [int] Size the loaded image initially gets resized to.
            imagesize: [int] Size the resized loaded image gets cropped to.
            split: [enum] Indicates if training or test split should be used.
            train_val_split: [float] Percentage of training data to use for training (vs validation).
            depth_threshold: [float] Threshold to binarize depth maps for foreground masks.
        """
        super().__init__()
        self.source = source
        self.split = split
        self.classname = classname
        self.train_val_split = train_val_split
        self.depth_threshold = depth_threshold
        self.name = classname

        self.image_paths, self.depth_paths, self.mask_paths, self.anomaly_types = self.get_image_data()

        self.transform_img = transforms.Compose([
            transforms.Resize(resize),
            transforms.CenterCrop(imagesize),
            transforms.ToTensor(),
            transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
        ])

        self.transform_depth = transforms.Compose([
            transforms.Resize(resize),
            transforms.CenterCrop(imagesize),
            transforms.ToTensor(),
        ])

        self.transform_mask = transforms.Compose([
            transforms.Resize(resize),
            transforms.CenterCrop(imagesize),
            transforms.ToTensor(),
        ])

        self.imagesize = (3, imagesize, imagesize)
        self.transform_std = IMAGENET_STD
        self.transform_mean = IMAGENET_MEAN

    def __getitem__(self, idx):
        image_path = self.image_paths[idx]
        depth_path = self.depth_paths[idx]
        anomaly = self.anomaly_types[idx]
        
        # 加载原图
        image = PIL.Image.open(image_path).convert("RGB")
        image_tensor = self.transform_img(image)
        
        # 加载深度图
        depth = PIL.Image.open(depth_path).convert("L")  # 转为单通道
        depth_tensor = self.transform_depth(depth)
        
        # 生成前景掩膜 (通过深度图阈值处理)
        depth_np = np.array(depth)
        # 使用Otsu阈值或其他自适应方法确定阈值
        if self.depth_threshold < 0:
            threshold = filters.threshold_otsu(depth_np)
        else:
            threshold = self.depth_threshold * depth_np.max()
            
        foreground_mask = (depth_np > threshold).astype(np.uint8) * 255
        
        # 应用形态学操作改善掩膜
        kernel = np.ones((5, 5), np.uint8)
        foreground_mask = cv2.morphologyEx(foreground_mask, cv2.MORPH_CLOSE, kernel)
        foreground_mask = cv2.morphologyEx(foreground_mask, cv2.MORPH_OPEN, kernel)
        
        foreground_mask_pil = PIL.Image.fromarray(foreground_mask)
        foreground_mask_tensor = self.transform_mask(foreground_mask_pil)
        
        # 加载异常掩膜 (如果存在)
        if self.split == DatasetSplit.TEST and self.mask_paths[idx] is not None:
            mask = PIL.Image.open(self.mask_paths[idx])
            mask_tensor = self.transform_mask(mask)
        else:
            mask_tensor = torch.zeros([1, *image_tensor.size()[1:]])

        return {
            "image": image_tensor,
            "depth": depth_tensor,
            "foreground_mask": foreground_mask_tensor,
            "mask": mask_tensor,
            "anomaly": anomaly,
            "is_anomaly": int(anomaly != "good"),
            "image_path": image_path,
            "depth_path": depth_path,
        }

    def __len__(self):
        return len(self.image_paths)

    def get_image_data(self):
        """
        扫描数据目录，收集RGB图像、深度图和异常掩码的路径。
        
        数据目录结构假设为:
        source/
            train/
                good/
                    rgb/
                        image_001.jpg
                        ...
                    depth/
                        image_001.png
                        ...
            test/
                good/
                    rgb/
                        image_001.jpg
                        ...
                    depth/
                        image_001.png
                        ...
                anomaly_type_1/
                    rgb/
                        image_001.jpg
                        ...
                    depth/
                        image_001.png
                        ...
                ground_truth/
                    anomaly_type_1/
                        image_001.png
                        ...
        """
        image_paths = []
        depth_paths = []
        mask_paths = []
        anomaly_types = []
        
        split_path = os.path.join(self.source, self.split.value)
        mask_path = os.path.join(self.source, "ground_truth")
        
        if os.path.exists(split_path):
            anomaly_categories = os.listdir(split_path)
            
            for anomaly in anomaly_categories:
                anomaly_rgb_path = os.path.join(split_path, anomaly, "rgb")
                anomaly_depth_path = os.path.join(split_path, anomaly, "depth")
                
                if os.path.exists(anomaly_rgb_path) and os.path.exists(anomaly_depth_path):
                    rgb_files = sorted(os.listdir(anomaly_rgb_path))
                    
                    for img_file in rgb_files:
                        img_name = os.path.splitext(img_file)[0]
                        depth_file = img_name + ".png"  # 假设深度图为PNG格式
                        
                        if os.path.exists(os.path.join(anomaly_depth_path, depth_file)):
                            image_paths.append(os.path.join(anomaly_rgb_path, img_file))
                            depth_paths.append(os.path.join(anomaly_depth_path, depth_file))
                            anomaly_types.append(anomaly)
                            
                            # 如果是测试集且不是"good"类别，查找对应的掩码
                            if self.split == DatasetSplit.TEST and anomaly != "good":
                                anomaly_mask_path = os.path.join(mask_path, anomaly)
                                if os.path.exists(anomaly_mask_path):
                                    mask_file = img_name + ".png"
                                    if os.path.exists(os.path.join(anomaly_mask_path, mask_file)):
                                        mask_paths.append(os.path.join(anomaly_mask_path, mask_file))
                                    else:
                                        mask_paths.append(None)
                                else:
                                    mask_paths.append(None)
                            else:
                                mask_paths.append(None)

        # 如果使用训练/验证分割
        if self.train_val_split < 1.0 and len(image_paths) > 0:
            n_samples = len(image_paths)
            indices = list(range(n_samples))
            split_idx = int(n_samples * self.train_val_split)
            
            if self.split == DatasetSplit.TRAIN:
                indices = indices[:split_idx]
            elif self.split == DatasetSplit.VAL:
                indices = indices[split_idx:]
                
            image_paths = [image_paths[i] for i in indices]
            depth_paths = [depth_paths[i] for i in indices]
            mask_paths = [mask_paths[i] for i in indices]
            anomaly_types = [anomaly_types[i] for i in indices]
            
        return image_paths, depth_paths, mask_paths, anomaly_types 

@click.command("drone_dataset")
@click.argument("data_path", type=click.Path(exists=True, file_okay=False))
@click.option("--category", "-c", type=str, required=True, help="风机叶片类别")
@click.option("--depth_threshold", type=float, default=0.3, help="深度阈值，用于生成前景掩膜")
@click.option("--resize", type=int, default=256, help="Size the loaded image initially gets resized to")
@click.option("--imagesize", type=int, default=224, help="Size the resized loaded image gets cropped to")
@click.option("--split", type=click.Choice(["train", "val", "test"]), default="test", help="Indicates if training or test split should be used")
@click.option("--train_val_split", type=float, default=1.0, help="Percentage of training data to use for training (vs validation)")
def drone_dataset(data_path, category, depth_threshold, resize, imagesize, split, train_val_split):
    """
    Command to create a drone dataset.

    Args:
        data_path: [str] Path to the data folder containing RGB and depth images.
        category: [str] Name of class/category.
        depth_threshold: [float] Threshold to binarize depth maps for foreground masks.
        resize: [int] Size the loaded image initially gets resized to.
        imagesize: [int] Size the resized loaded image gets cropped to.
        split: [str] Indicates if training or test split should be used.
        train_val_split: [float] Percentage of training data to use for training (vs validation).
    """
    dataset = DroneInspectionDataset(data_path, category, resize, imagesize, split, train_val_split, depth_threshold)
    print(f"Dataset created with {len(dataset)} samples")

if __name__ == "__main__":
    drone_dataset() 