import os
import PIL
import torch
import numpy as np
import cv2
from torchvision import transforms
from enum import Enum

IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]

_CLASSNAMES = [
    "bottle",
    "cable",
    "capsule",
    "carpet",
    "grid",
    "hazelnut",
    "leather",
    "metal_nut",
    "pill",
    "screw",
    "tile",
    "toothbrush",
    "transistor",
    "wood",
    "zipper",
]

class ThresholdMethod(Enum):
    """深度图阈值方法枚举类"""
    STATIC = "static"          # 静态阈值（手动设置固定值）
    OTSU = "otsu"              # Otsu自适应阈值
    ADAPTIVE = "adaptive"      # 自适应均值阈值
    EDGE = "edge"              # 边缘检测方法
    PERCENTILE = "percentile"  # 百分位数方法

class DatasetSplit(Enum):
    TRAIN = "train"
    TEST = "test"
    VAL = "val"


class MVTecDataset(torch.utils.data.Dataset):
    """
    修改后的 MVTec 数据集，同时加载 RGB 和深度图，
    并在 __getitem__ 中生成前景掩膜。
    
    数据目录结构示例：
        /home/guojing/project/patchcore/mvtec/
            blade8/
                train/
                    good/
                        rgb/         (jpg 图像)
                        depth/       (png 深度图)
                test/
                    good/
                        rgb/
                        depth/
                    bad/
                        rgb/
                        depth/
    """
    SPLIT_TRAIN = "train"
    SPLIT_TEST = "test"

    def __init__(
        self,
        source,
        classname,
        resize=256,
        imagesize=224,
        split=DatasetSplit.TEST,
        train_val_split=1.0,
        depth_threshold=0.5,  # 静态阈值方法的默认值
        threshold_method=ThresholdMethod.OTSU,  # 默认使用Otsu方法
        percentile_value=80,   # 百分位数方法的默认百分位值
        **kwargs,
    ):
        super().__init__()
        self.source = source          # 例如: /home/guojing/project/patchcore/mvtec
        self.split = split              # DatasetSplit.TRAIN 或 TEST
        self.classnames_to_use = [classname] if classname is not None else _CLASSNAMES
        self.train_val_split = train_val_split
        self.depth_threshold = depth_threshold
        self.threshold_method = threshold_method
        self.percentile_value = percentile_value
        self.name = classname           # 确保数据集类有 name 属性

        # 定义一个新的 transform pipeline
        transform_img = transforms.Compose([
            transforms.Resize(resize),
            transforms.RandomCrop(imagesize, padding=4),  # 随机裁剪并填充
            transforms.ToTensor(),
            transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
        ])

        self.transform_img = transform_img
        self.transform_mask = transforms.Compose([
            transforms.Resize(resize),
            transforms.CenterCrop(imagesize),
            transforms.ToTensor(),
        ])
        # 对深度图，直接转换为 tensor（[0,1]）
        self.transform_depth = transforms.Compose([
            transforms.Resize(resize),
            transforms.CenterCrop(imagesize),
            transforms.ToTensor(),
        ])

        self.imagesize = (3, imagesize, imagesize)
        self.transform_std = IMAGENET_STD
        self.transform_mean = IMAGENET_MEAN

        self.imgpaths_per_class, self.data_to_iterate = self.get_image_data()

    def get_image_data(self):
        """
        根据新目录结构获取数据列表，路径为：
           source/classname/split/anomaly/rgb   （RGB图）
           source/classname/split/anomaly/depth (深度图)
        其中 anomaly 可能为 "good" 或 "bad"（或其它异常类别）
        """
        imgpaths_per_class = {}
        depthpaths_per_class = {}
        maskpaths_per_class = {}

        for classname in self.classnames_to_use:
            # 构造路径：例如 /home/guojing/project/patchcore/mvtec/blade8/test
            class_path = os.path.join(self.source, classname, self.split.value)
            if not os.path.isdir(class_path):
                raise FileNotFoundError(f"目录不存在: {class_path}")

            anomaly_types = sorted(os.listdir(class_path))
            imgpaths_per_class[classname] = {}
            depthpaths_per_class[classname] = {}
            maskpaths_per_class[classname] = {}

            for anomaly in anomaly_types:
                # 注意：每个 anomaly 目录下包含 rgb 和 depth 文件夹
                rgb_dir = os.path.join(class_path, anomaly, "rgb")
                depth_dir = os.path.join(class_path, anomaly, "depth")

                if not os.path.isdir(rgb_dir):
                    raise FileNotFoundError(f"找不到目录: {rgb_dir}")
                if not os.path.isdir(depth_dir):
                    raise FileNotFoundError(f"找不到目录: {depth_dir}")

                rgb_files = sorted(os.listdir(rgb_dir))
                imgpaths_per_class[classname][anomaly] = [
                    os.path.join(rgb_dir, fname) for fname in rgb_files
                ]
                # 假设深度图文件名与 RGB 图相同（后缀改为 .png）
                depthpaths_per_class[classname][anomaly] = [
                    # os.path.join(depth_dir, os.path.splitext(fname)[0] + ".png")
                    os.path.join(depth_dir, os.path.splitext(fname)[0] + "_depth.png")
                    for fname in rgb_files
                ]

                # 若有需要对训练/验证集进行划分，可在此处添加代码
                if self.train_val_split < 1.0:
                    n_images = len(imgpaths_per_class[classname][anomaly])
                    split_idx = int(n_images * self.train_val_split)
                    if self.split == DatasetSplit.TRAIN:
                        imgpaths_per_class[classname][anomaly] = imgpaths_per_class[classname][anomaly][:split_idx]
                        depthpaths_per_class[classname][anomaly] = depthpaths_per_class[classname][anomaly][:split_idx]
                    elif self.split == DatasetSplit.VAL:
                        imgpaths_per_class[classname][anomaly] = imgpaths_per_class[classname][anomaly][split_idx:]
                        depthpaths_per_class[classname][anomaly] = depthpaths_per_class[classname][anomaly][split_idx:]
                
                # 若测试集且异常类别非 good，此处若有 ground truth 掩膜则加载（此处设为 None，可根据实际情况修改）
                if self.split == DatasetSplit.TEST and anomaly != "good":
                    maskpaths_per_class[classname][anomaly] = None
                else:
                    maskpaths_per_class[classname]["good"] = None

            data_to_iterate = []
            for anomaly in anomaly_types:
                for i, rgb_path in enumerate(imgpaths_per_class[classname][anomaly]):
                    depth_path = depthpaths_per_class[classname][anomaly][i]
                    if self.split == DatasetSplit.TEST and anomaly != "good":
                        mask_path = maskpaths_per_class[classname][anomaly]
                    else:
                        mask_path = None
                    data_to_iterate.append([classname, anomaly, rgb_path, depth_path, mask_path])
            imgpaths_per_class[classname] = imgpaths_per_class[classname]
        return imgpaths_per_class, data_to_iterate

    def calculate_threshold(self, depth_tensor):
        """
        根据选定的方法计算深度图阈值
        
        Args:
            depth_tensor: 深度图张量，形状为(1, H, W)，范围为[0,1]
            
        Returns:
            阈值值或前景掩码张量
        """
        # 将深度图转换为numpy数组
        depth_np = depth_tensor.squeeze().cpu().numpy()
        depth_uint8 = (depth_np * 255).astype(np.uint8)
        
        if self.threshold_method == ThresholdMethod.STATIC:
            # 静态阈值方法
            return self.depth_threshold
            
        elif self.threshold_method == ThresholdMethod.OTSU:
            # Otsu自适应阈值方法
            otsu_thresh, _ = cv2.threshold(
                depth_uint8, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU
            )
            return otsu_thresh / 255.0
            
        elif self.threshold_method == ThresholdMethod.ADAPTIVE:
            # 自适应均值阈值方法
            block_size = 11  # 必须是奇数
            c = 2            # 常数，从均值中减去
            binary = cv2.adaptiveThreshold(
                depth_uint8, 255, cv2.ADAPTIVE_THRESH_MEAN_C, 
                cv2.THRESH_BINARY, block_size, c
            )
            # 这里直接返回掩码
            return torch.tensor(binary / 255.0, dtype=torch.float).unsqueeze(0)
            
        elif self.threshold_method == ThresholdMethod.EDGE:
            # 边缘检测方法
            # 先用Canny检测边缘
            edges = cv2.Canny(depth_uint8, 30, 100)
            # 找到轮廓并填充
            contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            mask = np.zeros_like(depth_uint8)
            cv2.drawContours(mask, contours, -1, 255, -1)  # -1表示填充所有轮廓内部
            # 返回填充后的掩码
            return torch.tensor(mask / 255.0, dtype=torch.float).unsqueeze(0)
            
        elif self.threshold_method == ThresholdMethod.PERCENTILE:
            # 百分位数方法
            threshold = np.percentile(depth_np, self.percentile_value)
            return threshold
            
        else:
            # 默认使用静态阈值
            return self.depth_threshold

    def __getitem__(self, idx):
        classname, anomaly, rgb_path, depth_path, mask_path = self.data_to_iterate[idx]
        
        # 加载RGB图像并预处理
        image = PIL.Image.open(rgb_path).convert("RGB")
        image = self.transform_img(image)
        # 读取深度图（转换为灰度图）并预处理
        depth_image = PIL.Image.open(depth_path).convert("L")
        depth_tensor = self.transform_depth(depth_image)
        
        # 计算前景掩膜
        if self.threshold_method in [ThresholdMethod.ADAPTIVE, ThresholdMethod.EDGE]:
            # 这些方法直接返回掩码
            foreground_mask = self.calculate_threshold(depth_tensor)
        else:
            # 这些方法返回阈值，需要应用阈值
            threshold = self.calculate_threshold(depth_tensor)
            foreground_mask = (depth_tensor >= threshold).float()
        
        # 利用前景掩膜对图像进行遮罩，得到 mask 后图像
        masked_image = image * foreground_mask.expand_as(image)

        # 始终创建空掩码 - 不再尝试加载ground truth
        gt_mask = torch.zeros([1, image.shape[1], image.shape[2]])

        return {
            "image": masked_image,  # 返回 mask 后的图像作为输入
            "original_image": image,  # 添加原始图像以便比较
            "depth": depth_tensor,
            "foreground_mask": foreground_mask,
            "mask": gt_mask,
            "classname": classname,
            "anomaly": anomaly,
            "is_anomaly": int(anomaly != "good"),
            "image_name": "/".join(rgb_path.split("/")[-4:]),
            "image_path": rgb_path,
        }

    def __len__(self):
        return len(self.data_to_iterate)
