"""
雷达俯视图分割数据集
支持单通道输入、K折交叉验证、数据增强
"""
import os
import cv2
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import KFold
import albumentations as A
from albumentations.pytorch import ToTensorV2


class RadarSegDataset(Dataset):
    """
    雷达俯视图分割数据集

    Args:
        images_dir: 图像目录
        masks_dir: 标签目录
        file_list: 文件名列表
        to_rgb: 是否将单通道转换为3通道（用于ImageNet预训练模型）
        augment: 是否进行数据增强
        image_size: 图像尺寸（默认256）
    """

    def __init__(self, images_dir, masks_dir, file_list, to_rgb=False, augment=False, image_size=256):
        self.images_dir = images_dir
        self.masks_dir = masks_dir
        self.file_list = file_list
        self.to_rgb = to_rgb
        self.augment = augment
        self.image_size = image_size

        # 数据增强配置
        if self.augment:
            self.transform = A.Compose([
                A.HorizontalFlip(p=0.5),
                A.VerticalFlip(p=0.5),
                A.Rotate(limit=15, p=0.5),
                A.GaussNoise(var_limit=(10.0, 50.0), p=0.3),
                A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.3),
            ])
        else:
            self.transform = None

    def __len__(self):
        return len(self.file_list)

    def __getitem__(self, idx):
        # 获取文件名
        filename = self.file_list[idx]

        # 读取图像（灰度）
        image_path = os.path.join(self.images_dir, filename)
        image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)

        if image is None:
            raise ValueError(f"无法读取图像: {image_path}")

        # 读取mask（灰度）
        mask_path = os.path.join(self.masks_dir, filename)
        mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE)

        if mask is None:
            raise ValueError(f"无法读取mask: {mask_path}")

        # 确保尺寸正确
        if image.shape != (self.image_size, self.image_size):
            image = cv2.resize(image, (self.image_size, self.image_size))

        if mask.shape != (self.image_size, self.image_size):
            mask = cv2.resize(mask, (self.image_size, self.image_size))

        # 数据增强（如果启用）
        if self.transform is not None:
            augmented = self.transform(image=image, mask=mask)
            image = augmented['image']
            mask = augmented['mask']

        # 归一化图像到[0, 1]
        image = image.astype(np.float32) / 255.0

        # 二值化mask：>0的像素为前景(1)，否则为背景(0)
        mask = (mask > 0).astype(np.float32)

        # 转换为tensor
        image = torch.from_numpy(image).unsqueeze(0)  # (1, H, W)
        mask = torch.from_numpy(mask).unsqueeze(0)  # (1, H, W)

        # 如果需要，将单通道图像复制为3通道（用于ImageNet预训练模型）
        if self.to_rgb:
            image = image.repeat(3, 1, 1)  # (3, H, W)

        return image, mask


def create_kfold_dataloaders(images_dir, masks_dir, k_folds=8, fold_idx=0,
                              batch_size=8, num_workers=4, to_rgb=False,
                              augment_train=True, seed=42):
    """
    创建K折交叉验证的数据加载器

    Args:
        images_dir: 图像目录
        masks_dir: 标签目录
        k_folds: 折数
        fold_idx: 当前折的索引（0到k_folds-1）
        batch_size: 批大小
        num_workers: 数据加载线程数
        to_rgb: 是否转换为3通道
        augment_train: 训练集是否增强
        seed: 随机种子

    Returns:
        train_loader: 训练集数据加载器
        val_loader: 验证集数据加载器
        train_files: 训练集文件列表
        val_files: 验证集文件列表
    """
    # 获取所有文件名
    all_files = sorted([f for f in os.listdir(images_dir)
                        if f.endswith(('.png', '.jpg', '.jpeg'))])

    if len(all_files) == 0:
        raise ValueError(f"在 {images_dir} 中没有找到图像文件")

    print(f"[INFO] 总共找到 {len(all_files)} 个图像文件")

    # K折划分
    kfold = KFold(n_splits=k_folds, shuffle=True, random_state=seed)
    splits = list(kfold.split(all_files))

    # 获取当前折的训练集和验证集索引
    train_idx, val_idx = splits[fold_idx]

    # 获取文件列表
    train_files = [all_files[i] for i in train_idx]
    val_files = [all_files[i] for i in val_idx]

    print(f"[INFO] Fold {fold_idx}: 训练集 {len(train_files)} 张, 验证集 {len(val_files)} 张")

    # 创建数据集
    train_dataset = RadarSegDataset(
        images_dir=images_dir,
        masks_dir=masks_dir,
        file_list=train_files,
        to_rgb=to_rgb,
        augment=augment_train
    )

    val_dataset = RadarSegDataset(
        images_dir=images_dir,
        masks_dir=masks_dir,
        file_list=val_files,
        to_rgb=to_rgb,
        augment=False  # 验证集不增强
    )

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        pin_memory=True
    )

    val_loader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=True
    )

    return train_loader, val_loader, train_files, val_files
