import os
from enum import Enum

import PIL
import torch
import numpy as np
from torchvision import transforms
import cv2

IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]

class DatasetSplit(Enum):
    TRAIN = "train"
    VAL = "val"
    TEST = "test"

class RGBDDataset(torch.utils.data.Dataset):
    """RGB-D Dataset for anomaly detection."""

    def __init__(
        self,
        source,
        classname,
        resize=256,
        imagesize=224,
        split=DatasetSplit.TRAIN,
        train_val_split=1.0,
        seed=0,
        augment=False,
    ):
        self.source = source
        self.classname = classname
        self.resize = resize
        self.imagesize = imagesize
        self.split = split
        self.train_val_split = train_val_split
        self.augment = augment

        self.transform_img = transforms.Compose(
            [
                transforms.Resize(resize),
                transforms.CenterCrop(imagesize),
                transforms.ToTensor(),
                transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
            ]
        )

        self.transform_depth = transforms.Compose(
            [
                transforms.Resize(resize),
                transforms.CenterCrop(imagesize),
                transforms.ToTensor(),
            ]
        )

        self.transform_mask = transforms.Compose(
            [
                transforms.Resize(resize),
                transforms.CenterCrop(imagesize),
                transforms.ToTensor(),
            ]
        )

        self.imgpaths_per_class, self.data_to_iterate = self.get_image_data()

    def __getitem__(self, idx):
        classname, anomaly, rgb_path, depth_path, mask_path = self.data_to_iterate[idx]
        
        # Load RGB image
        rgb_image = PIL.Image.open(rgb_path).convert("RGB")
        rgb_image = self.transform_img(rgb_image)
        
        # Load depth image
        depth_image = PIL.Image.open(depth_path)
        depth_image = self.transform_depth(depth_image)
        
        # Generate foreground mask from depth
        depth_mask = self.generate_depth_mask(depth_image)
        
        if self.split == DatasetSplit.TEST and mask_path is not None:
            mask = PIL.Image.open(mask_path)
            mask = self.transform_mask(mask)
        else:
            mask = torch.zeros([1, *rgb_image.size()[1:]])

        return {
            "image": rgb_image,
            "depth": depth_image,
            "mask": mask,
            "depth_mask": depth_mask,
            "classname": classname,
            "anomaly": anomaly,
            "is_anomaly": int(anomaly != "good"),
            "image_name": "/".join(rgb_path.split("/")[-4:]),
            "image_path": rgb_path,
        }

    def generate_depth_mask(self, depth_image):
        """Generate foreground mask from depth image using Otsu's thresholding."""
        depth_np = depth_image.squeeze().numpy()
        # Normalize depth values to 0-255
        depth_np = ((depth_np - depth_np.min()) * (255.0 / (depth_np.max() - depth_np.min()))).astype(np.uint8)
        
        # Apply Otsu's thresholding
        threshold = np.mean(depth_np)  # Simple mean threshold, can be replaced with Otsu's method
        mask = (depth_np > threshold).astype(np.uint8)
        
        # Apply morphological operations to clean up the mask
        kernel = np.ones((5,5), np.uint8)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
        
        return torch.from_numpy(mask).unsqueeze(0)

    def __len__(self):
        return len(self.data_to_iterate)

    def get_image_data(self):
        imgpaths_per_class = {}
        maskpaths_per_class = {}

        for classname in [self.classname]:
            classpath = os.path.join(self.source, classname, self.split.value)
            maskpath = os.path.join(self.source, classname, "ground_truth")
            anomaly_types = os.listdir(classpath)

            imgpaths_per_class[classname] = {}
            maskpaths_per_class[classname] = {}

            for anomaly in anomaly_types:
                anomaly_path = os.path.join(classpath, anomaly)
                anomaly_files = sorted(os.listdir(os.path.join(anomaly_path, "rgb")))
                imgpaths_per_class[classname][anomaly] = {
                    "rgb": [os.path.join(anomaly_path, "rgb", x) for x in anomaly_files],
                    "depth": [os.path.join(anomaly_path, "depth", x) for x in anomaly_files]
                }

                if self.train_val_split < 1.0:
                    n_images = len(imgpaths_per_class[classname][anomaly]["rgb"])
                    train_val_split_idx = int(n_images * self.train_val_split)
                    if self.split == DatasetSplit.TRAIN:
                        imgpaths_per_class[classname][anomaly]["rgb"] = imgpaths_per_class[classname][anomaly]["rgb"][:train_val_split_idx]
                        imgpaths_per_class[classname][anomaly]["depth"] = imgpaths_per_class[classname][anomaly]["depth"][:train_val_split_idx]
                    elif self.split == DatasetSplit.VAL:
                        imgpaths_per_class[classname][anomaly]["rgb"] = imgpaths_per_class[classname][anomaly]["rgb"][train_val_split_idx:]
                        imgpaths_per_class[classname][anomaly]["depth"] = imgpaths_per_class[classname][anomaly]["depth"][train_val_split_idx:]

                if self.split == DatasetSplit.TEST and anomaly != "good":
                    anomaly_mask_path = os.path.join(maskpath, anomaly)
                    anomaly_mask_files = sorted(os.listdir(anomaly_mask_path))
                    maskpaths_per_class[classname][anomaly] = [
                        os.path.join(anomaly_mask_path, x) for x in anomaly_mask_files
                    ]
                else:
                    maskpaths_per_class[classname]["good"] = None

        # Unrolls the data dictionary to an easy-to-iterate list.
        data_to_iterate = []
        for classname in sorted(imgpaths_per_class.keys()):
            for anomaly in sorted(imgpaths_per_class[classname].keys()):
                for i in range(len(imgpaths_per_class[classname][anomaly]["rgb"])):
                    data_tuple = [
                        classname,
                        anomaly,
                        imgpaths_per_class[classname][anomaly]["rgb"][i],
                        imgpaths_per_class[classname][anomaly]["depth"][i]
                    ]
                    if self.split == DatasetSplit.TEST and anomaly != "good":
                        data_tuple.append(maskpaths_per_class[classname][anomaly][i])
                    else:
                        data_tuple.append(None)
                    data_to_iterate.append(data_tuple)

        return imgpaths_per_class, data_to_iterate 