import os
import PIL
import torch
import numpy as np
import cv2
from torchvision import transforms
from enum import Enum
import torch.nn.functional as F

IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]

_CLASSNAMES = [
    "bottle",
    "cable",
    "capsule",
    "carpet",
    "grid",
    "hazelnut",
    "leather",
    "metal_nut",
    "pill",
    "screw",
    "tile",
    "toothbrush",
    "transistor",
    "wood",
    "zipper",
]

class ThresholdMethod(Enum):
    """深度图阈值方法枚举类"""
    STATIC = "static"          # 静态阈值（手动设置固定值）
    OTSU = "otsu"              # Otsu自适应阈值
    ADAPTIVE = "adaptive"      # 自适应均值阈值
    EDGE = "edge"              # 边缘检测方法
    PERCENTILE = "percentile"  # 百分位数方法
    BBSNET = "bbsnet"          # 使用BBSNet模型生成显著性图

# BBSNet模型配置
BBSNET_MODEL_PATH = '/home/guojing/project/patchcore/patchcore-inspection-main/patchcore-inspection-main/src/new/model_pt/BBSNet_deploy.pt'  # 模型路径，根据实际位置调整
BBSNET_TESTSIZE = 224                              # 导出时用的testsize

class BBSNetSaliencyDetector:
    """BBSNet模型显著性检测器"""
    _instance = None
    _model = None
    _device = None
    _rgb_tf = None
    _depth_tf = None
    _initialized = False
    
    def __new__(cls):
        if cls._instance is None:
            cls._instance = super(BBSNetSaliencyDetector, cls).__new__(cls)
        return cls._instance
    
    def __init__(self):
        if BBSNetSaliencyDetector._initialized:
            return
            
        BBSNetSaliencyDetector._device = 'cuda' if torch.cuda.is_available() else 'cpu'
        try:
            # 在主进程中加载模型
            BBSNetSaliencyDetector._model = torch.jit.load(BBSNET_MODEL_PATH, map_location=BBSNetSaliencyDetector._device).eval()
            print(f"BBSNet model loaded from {BBSNET_MODEL_PATH}")
        except Exception as e:
            print(f"无法加载BBSNet模型: {str(e)}")
            BBSNetSaliencyDetector._model = None
        
        # 构建预处理转换
        BBSNetSaliencyDetector._rgb_tf = transforms.Compose([
            transforms.Resize((BBSNET_TESTSIZE, BBSNET_TESTSIZE)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])
        BBSNetSaliencyDetector._depth_tf = transforms.Compose([
            transforms.Resize((BBSNET_TESTSIZE, BBSNET_TESTSIZE)),
            transforms.ToTensor(),
        ])
        
        BBSNetSaliencyDetector._initialized = True
    
    def detect(self, rgb_image, depth_image):
        """
        使用BBSNet模型生成显著性图
        
        Args:
            rgb_image: PIL图像对象 (RGB)
            depth_image: PIL图像对象 (L - 灰度)
            
        Returns:
            显著性图, numpy数组 [0,1]
        """
        if BBSNetSaliencyDetector._model is None:
            print("BBSNet模型未正确加载，使用空的显著性图")
            return np.zeros((rgb_image.height, rgb_image.width), dtype=np.float32)
        
        # 记录原始尺寸
        orig_w, orig_h = rgb_image.size
        
        # 预处理 + 加batch维 + 送入设备
        img_tensor = BBSNetSaliencyDetector._rgb_tf(rgb_image).unsqueeze(0).to(BBSNetSaliencyDetector._device)
        depth_tensor = BBSNetSaliencyDetector._depth_tf(depth_image).unsqueeze(0).to(BBSNetSaliencyDetector._device)
        
        # 前向推理
        with torch.no_grad():
            try:
                res = BBSNetSaliencyDetector._model(img_tensor, depth_tensor)
                # 插值回原始尺寸
                res = F.interpolate(res,
                                    size=(orig_h, orig_w),
                                    mode='bilinear',
                                    align_corners=False)
                res = torch.sigmoid(res).cpu().numpy().squeeze()
            except Exception as e:
                print(f"BBSNet推理出错: {str(e)}")
                return np.zeros((orig_h, orig_w), dtype=np.float32)
        
        # Min–Max 归一化到 [0,1]
        if res.max() > res.min():
            res = (res - res.min()) / (res.max() - res.min() + 1e-8)
        
        return res

class DatasetSplit(Enum):
    TRAIN = "train"
    TEST = "test"
    VAL = "val"
    SINGLE = "single"  # 添加单张图片模式


class SingleImageDataset(torch.utils.data.Dataset):
    """
    用于处理单张图片的数据集类
    """
    def __init__(
        self,
        rgb_path,
        depth_path,
        classname="unknown",
        resize=256,
        imagesize=224,
        depth_threshold=0.5,
        threshold_method=ThresholdMethod.OTSU,
        percentile_value=80,
        **kwargs,
    ):
        super().__init__()
        self.rgb_path = rgb_path
        self.depth_path = depth_path
        self.name = classname
        self.depth_threshold = depth_threshold
        self.threshold_method = threshold_method
        self.percentile_value = percentile_value
        
        # 如果使用BBSNet，初始化检测器
        if self.threshold_method == ThresholdMethod.BBSNET:
            self.bbsnet_detector = BBSNetSaliencyDetector()

        self.transform_img = transforms.Compose([
            transforms.Resize(resize),
            transforms.CenterCrop(imagesize),
            transforms.ToTensor(),
            transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
        ])
        self.transform_depth = transforms.Compose([
            transforms.Resize(resize),
            transforms.CenterCrop(imagesize),
            transforms.ToTensor(),
        ])

        self.imagesize = (3, imagesize, imagesize)
        self.transform_std = IMAGENET_STD
        self.transform_mean = IMAGENET_MEAN
        
        # 为了兼容原有代码，创建一个虚拟的data_to_iterate
        self.data_to_iterate = [[classname, "unknown", rgb_path, depth_path, None]]
        
    def calculate_threshold(self, depth_tensor, rgb_image=None, depth_image=None):
        """
        根据选定的方法计算深度图阈值
        
        Args:
            depth_tensor: 深度图张量，形状为(1, H, W)，范围为[0,1]
            rgb_image: 原始RGB PIL图像（BBSNet方法需要）
            depth_image: 原始深度PIL图像（BBSNet方法需要）
            
        Returns:
            阈值值或前景掩码张量
        """
        # 将深度图转换为numpy数组
        depth_np = depth_tensor.squeeze().cpu().numpy()
        depth_uint8 = (depth_np * 255).astype(np.uint8)
        
        if self.threshold_method == ThresholdMethod.STATIC:
            # 静态阈值方法
            return self.depth_threshold
            
        elif self.threshold_method == ThresholdMethod.OTSU:
            # Otsu自适应阈值方法
            otsu_thresh, _ = cv2.threshold(
                depth_uint8, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU
            )
            return otsu_thresh / 255.0
            
        elif self.threshold_method == ThresholdMethod.ADAPTIVE:
            # 自适应均值阈值方法
            block_size = 11  # 必须是奇数
            c = 2            # 常数，从均值中减去
            binary = cv2.adaptiveThreshold(
                depth_uint8, 255, cv2.ADAPTIVE_THRESH_MEAN_C, 
                cv2.THRESH_BINARY, block_size, c
            )
            # 这里直接返回掩码
            return torch.tensor(binary / 255.0, dtype=torch.float).unsqueeze(0)
            
        elif self.threshold_method == ThresholdMethod.EDGE:
            # 边缘检测方法
            # 先用Canny检测边缘
            edges = cv2.Canny(depth_uint8, 30, 100)
            # 找到轮廓并填充
            contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            mask = np.zeros_like(depth_uint8)
            cv2.drawContours(mask, contours, -1, 255, -1)  # -1表示填充所有轮廓内部
            # 返回填充后的掩码
            return torch.tensor(mask / 255.0, dtype=torch.float).unsqueeze(0)
            
        elif self.threshold_method == ThresholdMethod.PERCENTILE:
            # 百分位数方法
            threshold = np.percentile(depth_np, self.percentile_value)
            return threshold
            
        elif self.threshold_method == ThresholdMethod.BBSNET:
            # BBSNet显著性检测方法
            if rgb_image is None or depth_image is None:
                # 如果没有提供原始图像，回退到OTSU方法
                print("没有提供原始图像，BBSNet方法回退到OTSU方法")
                otsu_thresh, _ = cv2.threshold(
                    depth_uint8, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU
                )
                return otsu_thresh / 255.0
                
            # 使用BBSNet生成显著性图
            saliency_map = self.bbsnet_detector.detect(rgb_image, depth_image)
            
            # 将显著性图二值化
            saliency_uint8 = (saliency_map * 255).astype(np.uint8)
            _, binary_mask = cv2.threshold(
                saliency_uint8, 
                int(self.depth_threshold * 255),  # 使用固定阈值进行二值化
                255, 
                cv2.THRESH_BINARY
            )
            
            # 转换为tensor并返回 - 确保尺寸与输入的depth_tensor一致
            binary_tensor = torch.tensor(binary_mask / 255.0, dtype=torch.float).unsqueeze(0)
            
            # 确保形状一致
            if binary_tensor.shape[-2:] != depth_tensor.shape[-2:]:
                binary_tensor = F.interpolate(
                    binary_tensor.unsqueeze(0),
                    size=depth_tensor.shape[-2:],
                    mode='nearest'
                ).squeeze(0)
            
            return binary_tensor
            
        else:
            # 默认使用静态阈值
            return self.depth_threshold
            
    def __getitem__(self, idx):
        # 只有一个图片，忽略idx
        # 加载RGB图像并预处理
        image = PIL.Image.open(self.rgb_path).convert("RGB")
        image_transformed = self.transform_img(image)
        # 读取深度图（转换为灰度图）并预处理
        depth_image = PIL.Image.open(self.depth_path).convert("L")
        depth_tensor = self.transform_depth(depth_image)
        
        # 计算前景掩膜
        try:
            if self.threshold_method in [ThresholdMethod.ADAPTIVE, ThresholdMethod.EDGE]:
                # 这些方法直接返回掩码
                foreground_mask = self.calculate_threshold(depth_tensor)
            elif self.threshold_method == ThresholdMethod.BBSNET:
                # BBSNet方法需要原始图像
                foreground_mask = self.calculate_threshold(depth_tensor, image, depth_image)
            else:
                # 这些方法返回阈值，需要应用阈值
                threshold = self.calculate_threshold(depth_tensor)
                foreground_mask = (depth_tensor >= threshold).float()
            
            # 确保前景掩膜的形状正确 (1, H, W)
            if foreground_mask.dim() == 0:  # 如果是标量（如阈值）
                foreground_mask = (depth_tensor >= foreground_mask).float()
            
            # 确保掩码是3D张量 [1, H, W]
            if foreground_mask.dim() == 2:
                foreground_mask = foreground_mask.unsqueeze(0)
            
            # 确保尺寸匹配
            if foreground_mask.shape[-2:] != image_transformed.shape[-2:]:
                foreground_mask = F.interpolate(
                    foreground_mask.unsqueeze(0) if foreground_mask.dim() == 3 else foreground_mask,
                    size=(image_transformed.shape[-2], image_transformed.shape[-1]),
                    mode='nearest'
                )
                if foreground_mask.dim() == 4:
                    foreground_mask = foreground_mask.squeeze(0)
            
            # 确保掩码可以扩展到图像的通道数
            if foreground_mask.shape[0] != 1:
                foreground_mask = foreground_mask[0:1]
            
            # 利用前景掩膜对图像进行遮罩
            masked_image = image_transformed * foreground_mask.expand_as(image_transformed)
            
        except Exception as e:
            print(f"前景掩膜生成错误: {str(e)}，使用原始图像")
            masked_image = image_transformed
            foreground_mask = torch.ones((1, image_transformed.shape[1], image_transformed.shape[2]), device=image_transformed.device)

        # 始终创建空掩码 - 不再尝试加载ground truth
        gt_mask = torch.zeros([1, image_transformed.shape[1], image_transformed.shape[2]])

        return {
            "image": masked_image,  # 返回 mask 后的图像作为输入
            "original_image": image_transformed,  # 添加原始图像以便比较
            "depth": depth_tensor,
            "foreground_mask": foreground_mask,
            "mask": gt_mask,
            "classname": self.name,
            "anomaly": "unknown",
            "is_anomaly": 0,  # 默认为非异常
            "image_name": os.path.basename(self.rgb_path),
            "image_path": self.rgb_path,
        }
        
    def __len__(self):
        return 1  # 只有一张图片


class MVTecDataset(torch.utils.data.Dataset):
    """
    修改后的 MVTec 数据集，同时加载 RGB 和深度图，
    并在 __getitem__ 中生成前景掩膜。
    
    数据目录结构示例：
        /home/guojing/project/patchcore/mvtec/
            blade8/
                train/
                    good/
                        rgb/         (jpg 图像)
                        depth/       (png 深度图)
                test/
                    good/
                        rgb/
                        depth/
                    bad/
                        rgb/
                        depth/
    """
    SPLIT_TRAIN = "train"
    SPLIT_TEST = "test"

    def __init__(
        self,
        source,
        classname,
        resize=256,
        imagesize=224,
        split=DatasetSplit.TEST,
        train_val_split=1.0,
        depth_threshold=0.5,  # 静态阈值方法的默认值
        threshold_method=ThresholdMethod.OTSU,  # 默认使用Otsu方法
        percentile_value=80,   # 百分位数方法的默认百分位值
        **kwargs,
    ):
        super().__init__()
        self.source = source          # 例如: /home/guojing/project/patchcore/mvtec
        self.split = split              # DatasetSplit.TRAIN 或 TEST
        self.classnames_to_use = [classname] if classname is not None else _CLASSNAMES
        self.train_val_split = train_val_split
        self.depth_threshold = depth_threshold
        self.threshold_method = threshold_method
        self.percentile_value = percentile_value
        self.name = classname           # 确保数据集类有 name 属性
        
        # 如果使用BBSNet，初始化检测器
        if self.threshold_method == ThresholdMethod.BBSNET:
            self.bbsnet_detector = BBSNetSaliencyDetector()

        self.transform_img = transforms.Compose([
            transforms.Resize(resize),
            transforms.CenterCrop(imagesize),
            transforms.ToTensor(),
            transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
        ])
        self.transform_mask = transforms.Compose([
            transforms.Resize(resize),
            transforms.CenterCrop(imagesize),
            transforms.ToTensor(),
        ])
        # 对深度图，直接转换为 tensor（[0,1]）
        self.transform_depth = transforms.Compose([
            transforms.Resize(resize),
            transforms.CenterCrop(imagesize),
            transforms.ToTensor(),
        ])

        self.imagesize = (3, imagesize, imagesize)
        self.transform_std = IMAGENET_STD
        self.transform_mean = IMAGENET_MEAN

        self.imgpaths_per_class, self.data_to_iterate = self.get_image_data()

    def get_image_data(self):
        """
        根据新目录结构获取数据列表，路径为：
           source/classname/split/anomaly/rgb   （RGB图）
           source/classname/split/anomaly/depth (深度图)
        其中 anomaly 可能为 "good" 或 "bad"（或其它异常类别）
        """
        imgpaths_per_class = {}
        depthpaths_per_class = {}
        maskpaths_per_class = {}

        for classname in self.classnames_to_use:
            # 构造路径：例如 /home/guojing/project/patchcore/mvtec/blade8/test
            class_path = os.path.join(self.source, classname, self.split.value)
            if not os.path.isdir(class_path):
                raise FileNotFoundError(f"目录不存在: {class_path}")

            anomaly_types = sorted(os.listdir(class_path))
            imgpaths_per_class[classname] = {}
            depthpaths_per_class[classname] = {}
            maskpaths_per_class[classname] = {}

            for anomaly in anomaly_types:
                # 注意：每个 anomaly 目录下包含 rgb 和 depth 文件夹
                rgb_dir = os.path.join(class_path, anomaly, "rgb")
                depth_dir = os.path.join(class_path, anomaly, "depth")

                if not os.path.isdir(rgb_dir):
                    raise FileNotFoundError(f"找不到目录: {rgb_dir}")
                if not os.path.isdir(depth_dir):
                    raise FileNotFoundError(f"找不到目录: {depth_dir}")

                rgb_files = sorted(os.listdir(rgb_dir))
                imgpaths_per_class[classname][anomaly] = [
                    os.path.join(rgb_dir, fname) for fname in rgb_files
                ]
                # 假设深度图文件名与 RGB 图相同（后缀改为 .png）
                depthpaths_per_class[classname][anomaly] = [
                    os.path.join(depth_dir, os.path.splitext(fname)[0] + ".png")
                    # os.path.join(depth_dir, os.path.splitext(fname)[0] + "_depth.png")
                    for fname in rgb_files
                ]

                # 若有需要对训练/验证集进行划分，可在此处添加代码
                if self.train_val_split < 1.0:
                    n_images = len(imgpaths_per_class[classname][anomaly])
                    split_idx = int(n_images * self.train_val_split)
                    if self.split == DatasetSplit.TRAIN:
                        imgpaths_per_class[classname][anomaly] = imgpaths_per_class[classname][anomaly][:split_idx]
                        depthpaths_per_class[classname][anomaly] = depthpaths_per_class[classname][anomaly][:split_idx]
                    elif self.split == DatasetSplit.VAL:
                        imgpaths_per_class[classname][anomaly] = imgpaths_per_class[classname][anomaly][split_idx:]
                        depthpaths_per_class[classname][anomaly] = depthpaths_per_class[classname][anomaly][split_idx:]
                
                # 若测试集且异常类别非 good，此处若有 ground truth 掩膜则加载（此处设为 None，可根据实际情况修改）
                if self.split == DatasetSplit.TEST and anomaly != "good":
                    maskpaths_per_class[classname][anomaly] = None
                else:
                    maskpaths_per_class[classname]["good"] = None

            data_to_iterate = []
            for anomaly in anomaly_types:
                for i, rgb_path in enumerate(imgpaths_per_class[classname][anomaly]):
                    depth_path = depthpaths_per_class[classname][anomaly][i]
                    if self.split == DatasetSplit.TEST and anomaly != "good":
                        mask_path = maskpaths_per_class[classname][anomaly]
                    else:
                        mask_path = None
                    data_to_iterate.append([classname, anomaly, rgb_path, depth_path, mask_path])
            imgpaths_per_class[classname] = imgpaths_per_class[classname]
        return imgpaths_per_class, data_to_iterate

    def calculate_threshold(self, depth_tensor, rgb_image=None, depth_image=None):
        """
        根据选定的方法计算深度图阈值
        
        Args:
            depth_tensor: 深度图张量，形状为(1, H, W)，范围为[0,1]
            rgb_image: 原始RGB PIL图像（BBSNet方法需要）
            depth_image: 原始深度PIL图像（BBSNet方法需要）
            
        Returns:
            阈值值或前景掩码张量
        """
        # 将深度图转换为numpy数组
        depth_np = depth_tensor.squeeze().cpu().numpy()
        depth_uint8 = (depth_np * 255).astype(np.uint8)
        
        if self.threshold_method == ThresholdMethod.STATIC:
            # 静态阈值方法
            return self.depth_threshold
            
        elif self.threshold_method == ThresholdMethod.OTSU:
            # Otsu自适应阈值方法
            otsu_thresh, _ = cv2.threshold(
                depth_uint8, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU
            )
            return otsu_thresh / 255.0
            
        elif self.threshold_method == ThresholdMethod.ADAPTIVE:
            # 自适应均值阈值方法
            block_size = 11  # 必须是奇数
            c = 2            # 常数，从均值中减去
            binary = cv2.adaptiveThreshold(
                depth_uint8, 255, cv2.ADAPTIVE_THRESH_MEAN_C, 
                cv2.THRESH_BINARY, block_size, c
            )
            # 这里直接返回掩码
            return torch.tensor(binary / 255.0, dtype=torch.float).unsqueeze(0)
            
        elif self.threshold_method == ThresholdMethod.EDGE:
            # 边缘检测方法
            # 先用Canny检测边缘
            edges = cv2.Canny(depth_uint8, 30, 100)
            # 找到轮廓并填充
            contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
            mask = np.zeros_like(depth_uint8)
            cv2.drawContours(mask, contours, -1, 255, -1)  # -1表示填充所有轮廓内部
            # 返回填充后的掩码
            return torch.tensor(mask / 255.0, dtype=torch.float).unsqueeze(0)
            
        elif self.threshold_method == ThresholdMethod.PERCENTILE:
            # 百分位数方法
            threshold = np.percentile(depth_np, self.percentile_value)
            return threshold
            
        elif self.threshold_method == ThresholdMethod.BBSNET:
            # BBSNet显著性检测方法
            if rgb_image is None or depth_image is None:
                # 如果没有提供原始图像，回退到OTSU方法
                print("没有提供原始图像，BBSNet方法回退到OTSU方法")
                otsu_thresh, _ = cv2.threshold(
                    depth_uint8, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU
                )
                return otsu_thresh / 255.0
                
            # 使用BBSNet生成显著性图
            saliency_map = self.bbsnet_detector.detect(rgb_image, depth_image)
            
            # 将显著性图二值化
            saliency_uint8 = (saliency_map * 255).astype(np.uint8)
            _, binary_mask = cv2.threshold(
                saliency_uint8, 
                int(self.depth_threshold * 255),  # 使用固定阈值进行二值化
                255, 
                cv2.THRESH_BINARY
            )
            
            # 转换为tensor并返回 - 确保尺寸与输入的depth_tensor一致
            binary_tensor = torch.tensor(binary_mask / 255.0, dtype=torch.float).unsqueeze(0)
            
            # 确保形状一致
            if binary_tensor.shape[-2:] != depth_tensor.shape[-2:]:
                binary_tensor = F.interpolate(
                    binary_tensor.unsqueeze(0),
                    size=depth_tensor.shape[-2:],
                    mode='nearest'
                ).squeeze(0)
            
            return binary_tensor
            
        else:
            # 默认使用静态阈值
            return self.depth_threshold

    def __getitem__(self, idx):
        classname, anomaly, rgb_path, depth_path, mask_path = self.data_to_iterate[idx]
        
        # 加载RGB图像和深度图（原始的，用于BBSNet）
        orig_rgb_image = PIL.Image.open(rgb_path).convert("RGB")
        orig_depth_image = PIL.Image.open(depth_path).convert("L")
        
        # 预处理图像
        image = self.transform_img(orig_rgb_image)
        depth_tensor = self.transform_depth(orig_depth_image)
        
        # 计算前景掩膜
        try:
            if self.threshold_method in [ThresholdMethod.ADAPTIVE, ThresholdMethod.EDGE]:
                # 这些方法直接返回掩码
                foreground_mask = self.calculate_threshold(depth_tensor)
            elif self.threshold_method == ThresholdMethod.BBSNET:
                # BBSNet方法需要原始图像
                foreground_mask = self.calculate_threshold(depth_tensor, orig_rgb_image, orig_depth_image)
            else:
                # 这些方法返回阈值，需要应用阈值
                threshold = self.calculate_threshold(depth_tensor)
                foreground_mask = (depth_tensor >= threshold).float()
            
            # 确保前景掩膜的形状正确 (1, H, W)
            if foreground_mask.dim() == 0:  # 如果是标量（如阈值）
                foreground_mask = (depth_tensor >= foreground_mask).float()
            
            # 确保掩码是3D张量 [1, H, W]
            if foreground_mask.dim() == 2:
                foreground_mask = foreground_mask.unsqueeze(0)
            
            # 确保尺寸匹配
            if foreground_mask.shape[-2:] != image.shape[-2:]:
                foreground_mask = F.interpolate(
                    foreground_mask.unsqueeze(0) if foreground_mask.dim() == 3 else foreground_mask,
                    size=(image.shape[-2], image.shape[-1]),
                    mode='nearest'
                )
                if foreground_mask.dim() == 4:
                    foreground_mask = foreground_mask.squeeze(0)
            
            # 确保掩码可以扩展到图像的通道数
            if foreground_mask.shape[0] != 1:
                foreground_mask = foreground_mask[0:1]
            
            # 利用前景掩膜对图像进行遮罩
            masked_image = image * foreground_mask.expand_as(image)
            
        except Exception as e:
            print(f"前景掩膜生成错误: {str(e)}，使用原始图像")
            masked_image = image
            foreground_mask = torch.ones((1, image.shape[1], image.shape[2]), device=image.device)

        # 始终创建空掩码 - 不再尝试加载ground truth
        gt_mask = torch.zeros([1, image.shape[1], image.shape[2]])

        return {
            "image": masked_image,  # 返回 mask 后的图像作为输入
            "original_image": image,  # 添加原始图像以便比较
            "depth": depth_tensor,
            "foreground_mask": foreground_mask,
            "mask": gt_mask,
            "classname": classname,
            "anomaly": anomaly,
            "is_anomaly": int(anomaly != "good"),
            "image_name": "/".join(rgb_path.split("/")[-4:]),
            "image_path": rgb_path,
        }

    def __len__(self):
        return len(self.data_to_iterate)