import os
import random

import cv2
import numpy as np
import torch
from torch.utils.data import Dataset
import json
from augmentation import create_augmentation_from_args
from utils.edge_preserving_gray import rgb_to_gray_edge_preserving

DATASET_NAMES = [
    'BIPED',
    'BSDS',
    'BRIND',
    'BSDS300',
    'CID',
    'DCD',
    'MDBD', #5
    'PASCAL',
    'NYUD',
    'CLASSIC'
]  # 8

def dataset_info(dataset_name, is_linux=True):
    if is_linux:

        config = {
            'BSDS': {
                'img_height': 768,
                'img_width': 768,
                'train_list': 'train_pair.lst',
                'test_list': 'val_pair.lst',
                'data_dir': '/home/dd/working/data/dexined/cropped_hy_images_split',
                'yita': 0.5
            },
            'BRIND': {
                'img_height': 512,  # 321
                'img_width': 512,  # 481
                'train_list': 'train_pair2.lst',
                'test_list': 'test_pair.lst',
                'data_dir': '/opt/dataset/BRIND',  # mean_rgb
                'yita': 0.5
            },
            'BSDS300': {
                'img_height': 512, #321
                'img_width': 512, #481
                'test_list': 'test_pair.lst',
                'train_list': None,
                'data_dir': '/opt/dataset/BSDS300',  # NIR
                'yita': 0.5
            },
            'PASCAL': {
                'img_height': 416, # 375
                'img_width': 512, #500
                'test_list': 'test_pair.lst',
                'train_list': None,
                'data_dir': '/opt/dataset/PASCAL',  # mean_rgb
                'yita': 0.3
            },
            'CID': {
                'img_height': 512,
                'img_width': 512,
                'test_list': 'test_pair.lst',
                'train_list': None,
                'data_dir': '/opt/dataset/CID',  # mean_rgb
                'yita': 0.3
            },
            'NYUD': {
                'img_height': 448,#425
                'img_width': 560,#560
                'test_list': 'test_pair.lst',
                'train_list': None,
                'data_dir': '/opt/dataset/NYUD',  # mean_rgb
                'yita': 0.5
            },
            'MDBD': {
                'img_height': 720,
                'img_width': 1280,
                'test_list': 'test_pair.lst',
                'train_list': 'train_pair.lst',
                'data_dir': '/opt/dataset/MDBD',  # mean_rgb
                'yita': 0.3
            },
            'BIPED': {
                'img_height': 720, #720 # 1088
                'img_width': 1280, # 1280 5 1920
                'test_list': 'test_pair.lst',
                'train_list': 'train_rgb.lst',
                'data_dir': 'data/BIPEDv2/BIPED',  # mean_rgb
                'yita': 0.5
            },
            'CLASSIC': {
                'img_height': 512,
                'img_width': 512,
                'test_list': None,
                'train_list': None,
                'data_dir': 'data',  # mean_rgb
                'yita': 0.5
            },
            'DCD': {
                'img_height': 352, #240
                'img_width': 480,# 360
                'test_list': 'test_pair.lst',
                'train_list': None,
                'data_dir': '/opt/dataset/DCD',  # mean_rgb
                'yita': 0.2
            }
        }
    else:
        config = {
            'BSDS': {'img_height': 512,  # 321
                     'img_width': 512,  # 481
                     'test_list': 'test_pair.lst',
                     'train_list': 'train_pair.lst',
                     'data_dir': 'C:/Users/xavysp/dataset/BSDS',  # mean_rgb
                     'yita': 0.5},
            'BSDS300': {'img_height': 512,  # 321
                        'img_width': 512,  # 481
                        'test_list': 'test_pair.lst',
                        'data_dir': 'C:/Users/xavysp/dataset/BSDS300',  # NIR
                        'yita': 0.5},
            'PASCAL': {'img_height': 375,
                       'img_width': 500,
                       'test_list': 'test_pair.lst',
                       'data_dir': 'C:/Users/xavysp/dataset/PASCAL',  # mean_rgb
                       'yita': 0.3},
            'CID': {'img_height': 512,
                    'img_width': 512,
                    'test_list': 'test_pair.lst',
                    'data_dir': 'C:/Users/xavysp/dataset/CID',  # mean_rgb
                    'yita': 0.3},
            'NYUD': {'img_height': 425,
                     'img_width': 560,
                     'test_list': 'test_pair.lst',
                     'data_dir': 'C:/Users/xavysp/dataset/NYUD',  # mean_rgb
                     'yita': 0.5},
            'MDBD': {'img_height': 720,
                         'img_width': 1280,
                         'test_list': 'test_pair.lst',
                         'train_list': 'train_pair.lst',
                         'data_dir': 'C:/Users/xavysp/dataset/MDBD',  # mean_rgb
                         'yita': 0.3},
            'BIPED': {'img_height': 720,  # 720
                      'img_width': 1280,  # 1280
                      'test_list': 'test_pair.lst',
                      'train_list': 'train_rgb.lst',
                      'data_dir': 'C:/Users/xavysp/dataset/BIPED',  # WIN: '../.../dataset/BIPED/edges'
                      'yita': 0.5},
            'CLASSIC': {'img_height': 512,
                        'img_width': 512,
                        'test_list': None,
                        'train_list': None,
                        'data_dir': 'data',  # mean_rgb
                        'yita': 0.5},
            'DCD': {'img_height': 240,
                    'img_width': 360,
                    'test_list': 'test_pair.lst',
                    'data_dir': 'C:/Users/xavysp/dataset/DCD',  # mean_rgb
                    'yita': 0.2}
        }
    return config[dataset_name]

class TestDataset(Dataset):
    def __init__(self,
                 data_root,
                 test_data,
                 mean_bgr,
                 img_height,
                 img_width,
                 test_list=None,
                 arg=None
                 ):
        if test_data not in DATASET_NAMES:
            raise ValueError(f"Unsupported dataset: {test_data}")

        self.data_root = data_root
        self.test_data = test_data
        self.test_list = test_list
        self.args=arg
        # self.arg = arg
        # self.mean_bgr = arg.mean_pixel_values[0:3] if len(arg.mean_pixel_values) == 4 \
        #     else arg.mean_pixel_values
        self.mean_bgr = mean_bgr
        self.img_height = img_height
        self.img_width = img_width
        self.data_index = self._build_index()

        print(f"mean_bgr: {self.mean_bgr}")

    def _build_index(self):
        sample_indices = []
        if self.test_data == "CLASSIC":
            # 单图测试：仅收集目录下的图像文件，过滤子目录和非图像
            assert os.path.isdir(self.data_root), f"无效目录: {self.data_root}"
            valid_ext = ('.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff')
            images_path = []
            for name in sorted(os.listdir(self.data_root)):
                p = os.path.join(self.data_root, name)
                if os.path.isfile(p) and name.lower().endswith(valid_ext):
                    images_path.append(name)
            assert len(images_path) > 0, f"CLASSIC 模式下目录无可读图像: {self.data_root}"
            labels_path = None
            sample_indices = [images_path, labels_path]
        else:
            # image and label paths are located in a list file

            if not self.test_list:
                raise ValueError(
                    f"Test list not provided for dataset: {self.test_data}")

            list_name = os.path.join(self.data_root, self.test_list)
            if self.test_data.upper()=='BIPED':

                with open(list_name) as f:
                    files = json.load(f)
                for pair in files:
                    tmp_img = pair[0]
                    tmp_gt = pair[1]
                    sample_indices.append(
                        (os.path.join(self.data_root, tmp_img),
                         os.path.join(self.data_root, tmp_gt),))
            else:
                with open(list_name, 'r') as f:
                    files = f.readlines()
                files = [line.strip() for line in files]
                pairs = [line.split() for line in files]

                for pair in pairs:
                    tmp_img = pair[0]
                    tmp_gt = pair[1]
                    sample_indices.append(
                        (os.path.join(self.data_root, tmp_img),
                         os.path.join(self.data_root, tmp_gt),))
        return sample_indices

    def __len__(self):
        return len(self.data_index[0]) if self.test_data.upper()=='CLASSIC' else len(self.data_index)

    def __getitem__(self, idx):
        # get data sample
        # image_path, label_path = self.data_index[idx]
        if self.data_index[1] is None:
            image_path = self.data_index[0][idx]
        else:
            image_path = self.data_index[idx][0]
        label_path = None if self.test_data == "CLASSIC" else self.data_index[idx][1]
        img_name = os.path.basename(image_path)
        file_name = os.path.splitext(img_name)[0] + ".png"

        # base dir
        if self.test_data.upper() == 'BIPED':
            img_dir = os.path.join(self.data_root, 'imgs', 'test')
            gt_dir = os.path.join(self.data_root, 'edge_maps', 'test')
        elif self.test_data.upper() == 'CLASSIC':
            img_dir = self.data_root
            gt_dir = None
        else:
            img_dir = self.data_root
            gt_dir = self.data_root

        # load data
        # 始终以彩色模式加载图像
        grayscale_mode = self.args and hasattr(self.args, 'grayscale') and self.args.grayscale
        gray_method = getattr(self.args, 'gray_method', 'gradient_fusion') if self.args else 'gradient_fusion'
        
        if self.test_data.upper() == 'BIPED':
            # For BIPED, paths from _build_index already include data_root
            image = cv2.imread(image_path, cv2.IMREAD_COLOR)
            # 如果是灰度模式，使用边缘保留的灰度转换方法
            if grayscale_mode:
                image = rgb_to_gray_edge_preserving(image, method=gray_method)
            if not self.test_data == "CLASSIC":
                label = cv2.imread(label_path, cv2.IMREAD_COLOR)
            else:
                label = None
        else:
            image_full = os.path.join(img_dir, image_path)
            image = cv2.imread(image_full, cv2.IMREAD_COLOR)
            # 如果是灰度模式，使用边缘保留的灰度转换方法
            if grayscale_mode:
                image = rgb_to_gray_edge_preserving(image, method=gray_method)
            if not self.test_data == "CLASSIC":
                label_full = os.path.join(gt_dir, label_path)
                label = cv2.imread(label_full, cv2.IMREAD_COLOR)
            else:
                label = None
            # Fail-fast: 路径/读图校验
            assert image is not None, f"图像读取失败: {image_full}"
            if not self.test_data == "CLASSIC":
                assert label is not None, f"标签读取失败: {label_full}"

        im_shape = [image.shape[0], image.shape[1]]
        image, label = self.transform(img=image, gt=label)

        return dict(images=image, labels=label, file_names=file_name, image_shape=im_shape)

    def transform(self, img, gt):
        # gt[gt< 51] = 0 # test without gt discrimination
        if self.test_data == "CLASSIC":
            img_height = self.img_height
            img_width = self.img_width
            print(
                f"actual size: {img.shape}, target size: {( img_height,img_width,)}")
            # img = cv2.resize(img, (self.img_width, self.img_height))
            img = cv2.resize(img, (img_width,img_height))
            gt = None
        else:
            # For all non-CLASSIC datasets, always resize to target dimensions
            # This ensures consistent input size regardless of original image dimensions
            img_width = self.args.test_img_width
            img_height = self.args.test_img_height
            img = cv2.resize(img, (img_width, img_height))
            gt = cv2.resize(gt, (img_width, img_height))

        # if self.yita is not None:
        #     gt[gt >= self.yita] = 1
        img = np.array(img, dtype=np.float32)
        # if self.rgb:
        #     img = img[:, :, ::-1]  # RGB->BGR
        # img=cv2.resize(img, (400, 464))
        
        # 灰度图使用单通道均值
        # Handle 2D grayscale (H, W) or 3D grayscale (H, W, 1)
        if len(img.shape) == 2 or (len(img.shape) == 3 and img.shape[-1] == 1):
            gray_mean = np.mean(self.mean_bgr)
            img -= gray_mean
            # Ensure 3D shape for transpose
            if len(img.shape) == 2:
                img = np.expand_dims(img, axis=-1)
        else:
            img -= self.mean_bgr
        
        img = img.transpose((2, 0, 1))
        img = torch.from_numpy(img.copy()).float()

        if self.test_data == "CLASSIC":
            gt = np.zeros((img.shape[:2]))
            gt = torch.from_numpy(np.array([gt])).float()
        else:
            gt = np.array(gt, dtype=np.float32)
            if len(gt.shape) == 3:
                gt = gt[:, :, 0]
            gt /= 255.
            gt = torch.from_numpy(np.array([gt])).float()

        return img, gt

class BipedDataset(Dataset):
    train_modes = ['train', 'test', ]
    dataset_types = ['rgbr', ]
    data_types = ['aug', ]

    def __init__(self,
                 data_root,
                 img_height,
                 img_width,
                 mean_bgr,
                 train_mode='train',
                 dataset_type='rgbr',
                 #  is_scaling=None,
                 # Whether to crop image or otherwise resize image to match image height and width.
                 crop_img=False,
                 arg=None
                 ):
        self.data_root = data_root
        self.train_mode = train_mode
        self.dataset_type = dataset_type
        self.data_type = 'aug'  # be aware that this might change in the future
        self.img_height = img_height
        self.img_width = img_width
        self.mean_bgr = mean_bgr
        self.crop_img = crop_img
        self.arg = arg
        
        # 检查是否使用mask（用于限制背景增强区域）
        self.use_mask = True

        # 初始化数据增强器
        self.augmentor = create_augmentation_from_args(arg) if arg else None
        if self.augmentor:
            print(f"数据增强已启用: {self.augmentor}")
        else:
            print("数据增强未启用")
        
        # 初始化外围背景替换增强器
        self.peripheral_bg_aug = None
        if arg is not None and getattr(arg, 'use_augmentation', False):
            if getattr(arg, 'aug_peripheral_bg', False):
                bg_dir = getattr(arg, 'aug_peripheral_bg_dir', '').strip()
                if bg_dir and os.path.isdir(bg_dir):
                    self.peripheral_bg_aug = PeripheralBackgroundReplacement(
                        background_dir=bg_dir,
                        prob=getattr(arg, 'aug_peripheral_bg_prob', 0.3),
                        blur_kernel_size=getattr(arg, 'aug_peripheral_blur_kernel', 15),
                        shadow_preservation=getattr(arg, 'aug_shadow_preservation', False),
                        invert_mask=getattr(arg, 'aug_invert_mask', False)
                    )
                elif bg_dir:
                    print(f"警告: 外围背景图像目录不存在或无效，跳过外围背景替换增强: {bg_dir}")

        self.data_index = self._build_index()

    def _build_index(self):
        assert self.train_mode in self.train_modes, self.train_mode
        assert self.dataset_type in self.dataset_types, self.dataset_type
        assert self.data_type in self.data_types, self.data_type

        data_root = os.path.abspath(self.data_root)
        sample_indices = []
        if self.arg.train_data.lower()=='biped':

            images_path = os.path.join(data_root,
                                       'edges/imgs',
                                       self.train_mode,
                                       self.dataset_type,
                                       self.data_type)
            labels_path = os.path.join(data_root,
                                       'edges/edge_maps',
                                       self.train_mode,
                                       self.dataset_type,
                                       self.data_type)
            
            # mask路径（如果使用mask）
            masks_path = None
            if self.use_mask:
                masks_path = os.path.join(data_root,
                                         'edges/masks',
                                         self.train_mode,
                                         self.dataset_type,
                                         self.data_type)
                if not os.path.exists(masks_path):
                    print(f"警告: mask目录不存在: {masks_path}，将不使用mask")
                    self.use_mask = False
                    masks_path = None

            for directory_name in os.listdir(images_path):
                image_directories = os.path.join(images_path, directory_name)
                for file_name_ext in os.listdir(image_directories):
                    file_name = os.path.splitext(file_name_ext)[0]
                    img_path = os.path.join(images_path, directory_name, file_name + '.jpg')
                    gt_path = os.path.join(labels_path, directory_name, file_name + '.png')
                    
                    if self.use_mask and masks_path:
                        mask_path = os.path.join(masks_path, directory_name, file_name + '.png')
                        sample_indices.append((img_path, gt_path, mask_path))
                    else:
                        sample_indices.append((img_path, gt_path, None))
        else:
            # Fix: Use test_list if train_mode is 'test'
            if self.train_mode == 'test' and hasattr(self.arg, 'test_list'):
                list_file = self.arg.test_list
            else:
                list_file = self.arg.train_list
                
            file_path = os.path.join(data_root, list_file)
            if self.arg.train_data.lower()=='bsds':

                with open(file_path, 'r') as f:
                    files = f.readlines()
                files = [line.strip() for line in files]

                pairs = [line.split() for line in files]
                for pair in pairs:
                    tmp_img = pair[0]
                    tmp_gt = pair[1]
                    # BSDS格式：检查是否有第三列（mask路径）
                    tmp_mask = pair[2] if len(pair) >= 3 and self.use_mask else None
                    sample_indices.append(
                        (os.path.join(data_root,tmp_img),
                         os.path.join(data_root,tmp_gt),
                         os.path.join(data_root,tmp_mask) if tmp_mask else None))
            else:
                with open(file_path) as f:
                    files = json.load(f)
                for pair in files:
                    tmp_img = pair[0]
                    tmp_gt = pair[1]
                    # JSON格式：检查是否有第三个元素（mask路径）
                    tmp_mask = pair[2] if len(pair) >= 3 and self.use_mask else None
                    sample_indices.append(
                        (os.path.join(data_root, tmp_img),
                         os.path.join(data_root, tmp_gt),
                         os.path.join(data_root, tmp_mask) if tmp_mask else None))

        # 统计包含Mask的样本数量并打印警告
        num_with_mask = sum(1 for item in sample_indices if len(item) > 2 and item[2] is not None)
        print(f"[Dataset] 总样本数: {len(sample_indices)}, 包含Mask的样本数: {num_with_mask}")
        
        if self.use_mask:
            if num_with_mask == 0:
                print("警告: 已启用Mask模式 (use_mask=True)，但在数据列表中未找到任何Mask路径！")
                print("      -> 背景增强将因为缺少Mask而被跳过。")
                print("      -> 请确保 .lst 文件包含第三列（Mask路径），或检查Mask文件是否存在。")
            elif num_with_mask < len(sample_indices):
                print(f"提示: 部分样本缺少Mask ({len(sample_indices) - num_with_mask}/{len(sample_indices)})，这些样本将跳过背景增强。")
            else:
                print("确认: 所有样本均包含Mask路径，背景增强将正常工作。")

        return sample_indices

    def __len__(self):
        return len(self.data_index)

    def __getitem__(self, idx):
        # get data sample
        image_path, label_path, mask_path = self.data_index[idx]

        # load data
        # 始终以彩色模式加载图像
        image = cv2.imread(image_path, cv2.IMREAD_COLOR)
        # 如果是灰度模式，使用边缘保留的灰度转换方法
        if self.arg and hasattr(self.arg, 'grayscale') and self.arg.grayscale:
            gray_method = getattr(self.arg, 'gray_method', 'gradient_fusion')
            image = rgb_to_gray_edge_preserving(image, method=gray_method)
        label = cv2.imread(label_path, cv2.IMREAD_GRAYSCALE)
        
        # 加载mask（如果存在）
        mask = None
        if mask_path and os.path.exists(mask_path):
            mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
        
        image, label = self.transform(img=image, gt=label, mask=mask)
        return dict(images=image, labels=label, file_names=image_path)

    def transform(self, img, gt, mask=None):
        gt = np.array(gt, dtype=np.float32)
        if len(gt.shape) == 3:
            gt = gt[:, :, 0]

        gt /= 255. # for DexiNed input and BDCN

        img = np.array(img, dtype=np.float32)
        
        # === 应用外围背景替换增强（在其他增强之前，需要uint8格式） ===
        if self.peripheral_bg_aug and self.train_mode == 'train':
            # 外围背景替换需要uint8格式的输入
            img_uint8 = np.clip(img, 0, 255).astype(np.uint8)
            gt_uint8 = np.clip(gt * 255, 0, 255).astype(np.uint8)
            
            # 应用外围背景替换（传递mask）
            img_aug, gt_aug = self.peripheral_bg_aug(img_uint8, gt_uint8, mask)
            
            # 转换回float32
            img = img_aug.astype(np.float32)
            gt = gt_aug.astype(np.float32) / 255.0
        
        # === 应用其他数据增强（在减去均值之前） ===
        if self.augmentor and self.train_mode == 'train':
            # 确保gt是2D数组，img是3D数组
            if len(gt.shape) == 2 and len(img.shape) == 3:
                # 数据增强需要uint8格式
                img_uint8 = np.clip(img, 0, 255).astype(np.uint8)
                gt_uint8 = np.clip(gt * 255, 0, 255).astype(np.uint8)
                
                # 应用增强
                img_aug, gt_aug = self.augmentor.random_augment(img_uint8, gt_uint8)
                
                # 转换回float32
                img = img_aug.astype(np.float32)
                gt = gt_aug.astype(np.float32) / 255.0
                
                # 确保灰度图像保持3维形状 (H, W, 1)
                if len(img.shape) == 2:
                    img = np.expand_dims(img, axis=-1)
        
        # 减去均值（在增强之后）
        # 灰度图使用单通道均值
        if len(img.shape) == 3 and img.shape[-1] == 1:
            # 灰度图：使用单个均值（通常取RGB均值的平均）
            gray_mean = np.mean(self.mean_bgr)
            img -= gray_mean
        else:
            # 彩色图：使用BGR均值
            img -= self.mean_bgr
        i_h, i_w,_ = img.shape
        # data = []
        # if self.scale is not None:
        #     for scl in self.scale:
        #         img_scale = cv2.resize(img, None, fx=scl, fy=scl, interpolation=cv2.INTER_LINEAR)
        #         data.append(torch.from_numpy(img_scale.transpose((2, 0, 1))).float())
        #     return data, gt
        #  400 for BIPEd and 352 for BSDS check with 384
        crop_size = self.img_height if self.img_height == self.img_width else self.img_height#448# MDBD=480 BIPED=480/400 BSDS=352

        # 检查是否启用裁剪
        # crop_img 现在作为裁剪概率 (0.0-1.0)
        crop_prob = float(self.crop_img)
        if random.random() < crop_prob and i_w > crop_size and i_h > crop_size:
            # 启用裁剪：随机裁剪
            i = random.randint(0, i_h - crop_size)
            j = random.randint(0, i_w - crop_size)
            img = img[i:i + crop_size , j:j + crop_size ]
            gt = gt[i:i + crop_size , j:j + crop_size ]
        else:
            # 不裁剪：直接 resize
            img = cv2.resize(img, dsize=(crop_size, crop_size))
            gt = cv2.resize(gt, dsize=(crop_size, crop_size))
            # 确保灰度图像在resize后保持3维形状 (H, W, 1)
            if len(img.shape) == 2:
                img = np.expand_dims(img, axis=-1)
            
        # # 原始逻辑（已禁用，crop_img 参数现在控制行为）
        # # for BSDS 352/BRIND
        # if i_w> crop_size and i_h>crop_size:
        #     i = random.randint(0, i_h - crop_size)
        #     j = random.randint(0, i_w - crop_size)
        #     img = img[i:i + crop_size , j:j + crop_size ]
        #     gt = gt[i:i + crop_size , j:j + crop_size ]

        # # for BIPED/MDBD
        # if np.random.random() > 0.4: #l
        #     h,w = gt.shape
        #     if i_w> 500 and i_h>500:
        #
        #         LR_img_size = crop_size #l BIPED=256, 240 200 # MDBD= 352 BSDS= 176
        #         i = random.randint(0, h - LR_img_size)
        #         j = random.randint(0, w - LR_img_size)
        #         # if img.
        #         img = img[i:i + LR_img_size , j:j + LR_img_size ]
        #         gt = gt[i:i + LR_img_size , j:j + LR_img_size ]
        #     else:
        #         LR_img_size = 352#256  # l BIPED=208-352, # MDBD= 352-480- BSDS= 176-320
        #         i = random.randint(0, h - LR_img_size)
        #         j = random.randint(0, w - LR_img_size)
        #         # if img.
        #         img = img[i:i + LR_img_size, j:j + LR_img_size]
        #         gt = gt[i:i + LR_img_size, j:j + LR_img_size]
        #         img = cv2.resize(img, dsize=(crop_size, crop_size), )
        #         gt = cv2.resize(gt, dsize=(crop_size, crop_size))

        # BRIND
        gt[gt > 0.1] +=0.2#0.4
        gt = np.clip(gt, 0., 1.)
        # gt[gt > 0.1] =1#0.4
        # gt = np.clip(gt, 0., 1.)
        # # for BIPED
        # gt[gt > 0.2] += 0.6# 0.5 for BIPED
        # gt = np.clip(gt, 0., 1.) # BIPED
        # # for MDBD
        # gt[gt > 0.1] +=0.7
        # gt = np.clip(gt, 0., 1.)
        # # For RCF input
        # # -----------------------------------
        # gt[gt==0]=0.
        # gt[np.logical_and(gt>0.,gt<0.5)] = 2.
        # gt[gt>=0.5]=1.
        #
        # gt = gt.astype('float32')
        # ----------------------------------

        # 确保图像在transpose之前是3维的 (H, W, C)
        if len(img.shape) == 2:
            img = np.expand_dims(img, axis=-1)
        
        img = img.transpose((2, 0, 1))
        img = torch.from_numpy(img.copy()).float()
        gt = torch.from_numpy(np.array([gt])).float()
        return img, gt


class PeripheralBackgroundReplacement:
    """外围背景替换增强：仅替换最外层轮廓以外的区域
    
    该增强方法通过以下步骤实现：
    1. 提取边缘并使用闭运算形成封闭轮廓
    2. 查找最外层轮廓并填充为前景区域
    3. 反转得到背景区域并替换
    4. 使用高斯模糊实现自然融合
    """
    
    def __init__(self, background_dir, prob=0.3, blur_kernel_size=15, shadow_preservation=False, invert_mask=False):
        """
        Args:
            background_dir: 背景图像目录路径（支持递归搜索）
            prob: 应用概率 (0.0-1.0)
            blur_kernel_size: 高斯模糊核大小，用于平滑边界过渡（建议11-21的奇数）
            shadow_preservation: 是否保留原图的阴影/光照信息 (Multiply模式)
            invert_mask: 是否反转Mask的前景/背景定义 (True: 0=前景, 255=背景)
        """
        self.prob = prob
        self.blur_kernel_size = blur_kernel_size if blur_kernel_size % 2 == 1 else blur_kernel_size + 1
        self.shadow_preservation = shadow_preservation
        self.invert_mask = invert_mask
        
        # 加载背景图像路径列表（递归搜索所有子目录）
        self.bg_paths = []
        valid_extensions = ['*.jpg', '*.jpeg', '*.png', '*.bmp', '*.tif', '*.tiff']
        
        if not os.path.isdir(background_dir):
            raise ValueError(f"背景图像目录不存在: {background_dir}")
        
        for ext in valid_extensions:
            # 当前目录
            self.bg_paths.extend(glob.glob(os.path.join(background_dir, ext)))
            # 递归搜索子目录
            self.bg_paths.extend(glob.glob(os.path.join(background_dir, '**', ext), recursive=True))
        
        if len(self.bg_paths) == 0:
            raise ValueError(f"未在 {background_dir} 找到任何背景图像")
        
        # 去重（递归搜索可能产生重复）
        self.bg_paths = list(set(self.bg_paths))
        
        print(f"[外围背景替换增强] 初始化完成")
        print(f"  - 背景图像数量: {len(self.bg_paths)}")
        print(f"  - 应用概率: {prob:.1%}")
        print(f"  - 模糊核大小: {self.blur_kernel_size}px")
        print(f"  - 影子保留: {'启用' if shadow_preservation else '禁用'}")
        print(f"  - 替换模式: 仅最外层轮廓外（无形态学操作）")
    
    def __call__(self, image, edge_gt, mask=None):
        """
        应用外围背景替换增强
        
        Args:
            image: RGB/灰度图像 (H, W, 3) or (H, W) or (H, W, 1), numpy array, uint8, 值域 0-255
            edge_gt: 边缘标注 (H, W), numpy array, uint8, 值域 0-255
            mask: 可选的前景mask (H, W), numpy array, uint8, 值域 0-255
                  mask=255表示前景物体（保护区域），mask=0表示背景（允许替换区域）
                  如果提供mask，则只替换mask=0的区域
        
        Returns:
            (增强后的图像, 原始标注)
        """
        # 概率控制
        if random.random() > self.prob:
            return image, edge_gt
        
        # 如果没有mask，不做背景增强
        if mask is None:
            return image, edge_gt
        
        # 随机选择背景图
        bg_path = random.choice(self.bg_paths)
        bg_image = cv2.imread(bg_path, cv2.IMREAD_COLOR)
        
        if bg_image is None:
            # 如果背景图读取失败，返回原图
            return image, edge_gt
        
        # 处理图像维度
        is_grayscale = False
        if len(image.shape) == 2:
            # (H, W) -> (H, W, 1)
            image = np.expand_dims(image, axis=-1)
            is_grayscale = True
        elif image.shape[-1] == 1:
            is_grayscale = True
        
        h, w = image.shape[:2]
        
        # === 智能调整背景图尺寸 ===
        # 策略：避免低分辨率背景放大导致模糊
        bg_h, bg_w = bg_image.shape[:2]
        
        # 如果背景图分辨率远小于目标尺寸，使用平铺策略
        # 阈值：背景图任一维度小于目标的70%时，认为是低分辨率
        use_tiling = (bg_h < h * 0.7) or (bg_w < w * 0.7)
        
        if use_tiling:
            # 策略1: 平铺填充（适合纹理背景）
            # 计算需要平铺的次数
            tile_y = int(np.ceil(h / bg_h)) + 1
            tile_x = int(np.ceil(w / bg_w)) + 1
            
            # 平铺背景图
            bg_tiled = np.tile(bg_image, (tile_y, tile_x, 1))
            
            # 随机裁剪以增加多样性
            max_y = bg_tiled.shape[0] - h
            max_x = bg_tiled.shape[1] - w
            start_y = random.randint(0, max(0, max_y))
            start_x = random.randint(0, max(0, max_x))
            
            bg_image = bg_tiled[start_y:start_y+h, start_x:start_x+w]
        else:
            # 策略2: 直接缩放（背景分辨率足够高）
            # 使用更高质量的插值方法
            if bg_h > h and bg_w > w:
                # 缩小：使用 INTER_AREA（最佳质量）
                bg_image = cv2.resize(bg_image, (w, h), interpolation=cv2.INTER_AREA)
            else:
                # 放大：使用 INTER_CUBIC（较好质量）或 INTER_LANCZOS4（最佳质量但较慢）
                bg_image = cv2.resize(bg_image, (w, h), interpolation=cv2.INTER_CUBIC)
        
        # 如果输入是灰度图，将背景也转为灰度
        if is_grayscale:
            if len(bg_image.shape) == 3:
                bg_image = cv2.cvtColor(bg_image, cv2.COLOR_BGR2GRAY)
            bg_image = np.expand_dims(bg_image, axis=-1)
        
        # === 决定使用mask还是edge_gt来定义前景区域 ===
        # 使用提供的mask：mask=255为前景（保护），mask=0为背景（可替换）
        # 建议：使用阈值二值化，防止JPG噪声或非纯黑背景导致的微弱替换，同时也兼容0/1格式的Mask
        if self.invert_mask:
            # 反转模式：0为前景（保护），255为背景（可替换）
            # foreground_mask = 1 where mask is 0 (Plate)
            foreground_mask = (mask == 0).astype(np.float32)
        else:
            # 默认模式：255为前景（保护），0为背景（可替换）
            foreground_mask = (mask > 0).astype(np.float32)
        
        # foreground_mask = mask.astype(np.float32) / 255.0
        
        # 不需要进行形态学操作，直接使用mask

        
        # === 步骤3: 反转得到背景掩码 ===
        background_mask = 1.0 - foreground_mask
        
        # === 步骤4: 高斯模糊边界以实现平滑过渡 ===
        # 自适应模糊：基于距离变换，远离边缘区域模糊更强
        if self.blur_kernel_size > 1:
            # 计算前景区域的距离变换（距离边缘的像素距离）
            dist_transform = cv2.distanceTransform(
                (foreground_mask * 255).astype(np.uint8), 
                cv2.DIST_L2, 
                5
            )
            # 归一化到0-1，距离边缘越远值越大
            dist_norm = np.clip(dist_transform / 50.0, 0, 1)
            
            # 创建自适应模糊掩码：远离边缘(dist_norm接近1)时强模糊，靠近边缘时弱模糊
            # 这样可以保护边缘清晰度，同时在远处实现平滑过渡
            background_mask_strong = cv2.GaussianBlur(
                background_mask, 
                (self.blur_kernel_size, self.blur_kernel_size), 
                0
            )
            background_mask_weak = cv2.GaussianBlur(
                background_mask, 
                (max(3, self.blur_kernel_size // 3) | 1, max(3, self.blur_kernel_size // 3) | 1), 
                0
            )
            # 根据距离混合强弱模糊
            background_mask_blurred = (1 - dist_norm) * background_mask_weak + dist_norm * background_mask_strong
        else:
            # 不使用模糊
            background_mask_blurred = background_mask
        
        # === 步骤5: 融合原图与背景图 ===
        result = image.copy().astype(np.float32)
        bg_image_float = bg_image.astype(np.float32)
        
        # >>>>> 新增：影子/光照保留逻辑 (Shadow Preservation) <<<<<
        if self.shadow_preservation:
            # 计算原图的亮度 (Luminance)
            if image.shape[-1] == 3:
                # BGR 转 灰度 作为亮度图
                luminance = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY).astype(np.float32) / 255.0
            else:
                luminance = image.squeeze().astype(np.float32) / 255.0
            
            # 扩展维度以匹配背景图 (H, W, 1)
            if len(bg_image_float.shape) == 3:
                luminance = np.expand_dims(luminance, axis=-1)
            
            # 应用正片叠底 (Multiply)：新背景 = 新背景 * 原图亮度
            # 这样原图中 mask=0 区域的阴影（暗部）会使新背景对应位置变暗，从而保留影子
            bg_image_float = bg_image_float * luminance
        # >>>>> 结束新增 <<<<<
        
        # 扩展掩码维度以匹配图像通道数
        if len(result.shape) == 3:
            mask_3d = np.expand_dims(background_mask_blurred, axis=-1)
        else:
            mask_3d = background_mask_blurred
        
        # 加权融合：mask=1为背景，mask=0为前景
        result = (1.0 - mask_3d) * result + mask_3d * bg_image_float
        
        # 裁剪并转换回uint8
        result = np.clip(result, 0, 255).astype(np.uint8)
        
        return result, edge_gt
    
    def __repr__(self):
        return (f"PeripheralBackgroundReplacement("
                f"num_backgrounds={len(self.bg_paths)}, "
                f"prob={self.prob}, "
                f"blur_kernel={self.blur_kernel_size}, "
                f"shadow_preservation={self.shadow_preservation})")


# 导入glob模块（如果文件顶部没有导入）
import glob

class SegmentationDataset(BipedDataset):
    def __init__(self,
                 data_root,
                 img_height,
                 img_width,
                 mean_bgr,
                 train_mode='train',
                 dataset_type='rgbr',
                 crop_img=False,
                 arg=None,
                 preprocessing=None):
        super().__init__(data_root, img_height, img_width, mean_bgr, train_mode, dataset_type, crop_img, arg)
        # Ensure masks are always loaded
        self.use_mask = True 
        self.preprocessing = preprocessing

    def __getitem__(self, idx):
        # Reuse the index from BipedDataset
        # data_index[idx] returns (image_path, label_path, mask_path)
        
        data_item = self.data_index[idx]
        image_path = data_item[0]
        # label_path = data_item[1] # We don't use edge label for segmentation
        mask_path = data_item[2] if len(data_item) > 2 else None

        # 1. Load Image
        image = cv2.imread(image_path, cv2.IMREAD_COLOR)
        if image is None:
             raise ValueError(f"Failed to load image: {image_path}")

        # 如果是灰度模式，使用边缘保留的灰度转换方法
        if self.arg and hasattr(self.arg, 'grayscale') and self.arg.grayscale:
            gray_method = getattr(self.arg, 'gray_method', 'gradient_fusion')
            image = rgb_to_gray_edge_preserving(image, method=gray_method)
            if len(image.shape) == 2:
                image = np.expand_dims(image, axis=-1)

        # 2. Load Mask (Label)
        # For segmentation, the MASK is the label, not the edge map
        if mask_path and os.path.exists(mask_path):
            mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)
        else:
            # Fallback: If no mask, try to use edge map or return zeros
            # Ideally we should have masks. 
            print(f"Warning: No mask found for {image_path}, using zeros.")
            mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)

        # 3. Apply Transforms & Normalization
        # Note: We pass 'mask' as 'gt' to the transform function
        image, mask = self.transform_segmentation(image, mask)

        return dict(images=image, labels=mask, file_names=os.path.basename(image_path))

    def transform_segmentation(self, img, mask):
        # 1. Pre-process Mask
        if mask is None:
            mask = np.zeros(img.shape[:2], dtype=np.float32)
        
        mask = np.array(mask, dtype=np.float32)
        if len(mask.shape) == 3:
            mask = mask[:, :, 0]
        
        # Normalize to 0-1 for augmentation processing
        mask /= 255.

        img = np.array(img, dtype=np.float32)

        # 2. Augmentation: Peripheral Background Replacement
        if self.peripheral_bg_aug and self.train_mode == 'train':
            img_uint8 = np.clip(img, 0, 255).astype(np.uint8)
            mask_uint8 = np.clip(mask * 255, 0, 255).astype(np.uint8)
            
            # Pass mask as both edge_gt (to be transformed) and mask (to guide replacement)
            img_aug, mask_aug = self.peripheral_bg_aug(img_uint8, mask_uint8, mask=mask_uint8)
            
            img = img_aug.astype(np.float32)
            mask = mask_aug.astype(np.float32) / 255.0

        # 3. Augmentation: Geometric & Photometric (Flip, Rotate, etc.)
        if self.augmentor and self.train_mode == 'train':
            if len(mask.shape) == 2 and len(img.shape) == 3:
                img_uint8 = np.clip(img, 0, 255).astype(np.uint8)
                mask_uint8 = np.clip(mask * 255, 0, 255).astype(np.uint8)
                
                img_aug, mask_aug = self.augmentor.random_augment(img_uint8, mask_uint8)
                
                img = img_aug.astype(np.float32)
                mask = mask_aug.astype(np.float32) / 255.0
                
                if len(img.shape) == 2:
                    img = np.expand_dims(img, axis=-1)

        # 4. Normalization (Mean Subtraction or SMP Preprocessing)
        if self.preprocessing:
            # Convert BGR to RGB
            if len(img.shape) == 3 and img.shape[-1] == 3:
                img = img[:, :, ::-1] # BGR to RGB
            
            img = self.preprocessing(img)
            img = img.astype(np.float32)
        else:
            if len(img.shape) == 3 and img.shape[-1] == 1:
                gray_mean = np.mean(self.mean_bgr)
                img -= gray_mean
            else:
                img -= self.mean_bgr

        # 5. Cropping or Resizing
        i_h, i_w, _ = img.shape
        crop_size = self.img_height if self.img_height == self.img_width else self.img_height
        
        # Random Crop with probability defined by crop_img
        crop_prob = float(self.crop_img)
        if random.random() < crop_prob and i_w > crop_size and i_h > crop_size:
            # Random Crop
            i = random.randint(0, i_h - crop_size)
            j = random.randint(0, i_w - crop_size)
            img = img[i:i + crop_size , j:j + crop_size ]
            mask = mask[i:i + crop_size , j:j + crop_size ]
        else:
            # Resize to target
            img = cv2.resize(img, dsize=(self.img_width, self.img_height))
            mask = cv2.resize(mask, dsize=(self.img_width, self.img_height), interpolation=cv2.INTER_NEAREST)
            if len(img.shape) == 2:
                img = np.expand_dims(img, axis=-1)

        # 6. Final Formatting
        # User requirement: Any value > 0 is foreground
        mask = (mask > 0).astype(np.float32) 
        
        # To Tensor (C, H, W)
        img = img.transpose((2, 0, 1))
        img = torch.from_numpy(img.copy()).float()
        
        # Mask to Tensor (1, H, W)
        mask = torch.from_numpy(mask.copy()).float().unsqueeze(0)
        
        return img, mask
