import os
import torch
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import logging


class FogDataset(Dataset):
    def __init__(self, root_dir, frame_len=15, transform=None, mode="train"):
        """
        :param root_dir: 数据集根目录
        :param frame_len: 每个样本的帧数
        :param transform: 数据增强或预处理
        :param mode: 'train' or 'val' or 'test'
        """
        self.root_dir = root_dir
        self.frame_len = frame_len
        self.transform = transform
        self.mode = mode
        self.video_cache = {}
        self.video_infos = []
        self.categories = ["nofoggy", "foggy"]

        print(f"[{mode} 模式] 开始扫描数据集根目录: {root_dir}")

        # 遍历类别
        for label, cls_name in enumerate(self.categories):
            cls_dir = os.path.join(root_dir, cls_name)
            if not os.path.exists(cls_dir):
                print(f"警告: 类别目录不存在, 跳过: {cls_dir}")
                continue

            for video_name in os.listdir(cls_dir):
                video_dir = os.path.join(cls_dir, video_name)
                if not os.path.isdir(video_dir):
                    continue
                frames_paths = sorted([
                    os.path.join(video_dir, f) for f in os.listdir(video_dir)
                    if f.lower().endswith((".jpg", ".png", ".jpeg", ".webp"))
                ])

                num_frames = len(frames_paths)
                if num_frames < frame_len:
                    continue
                self.video_cache[video_dir] = frames_paths
                step = frame_len if mode != "train" else frame_len // 2
                for start in range(0, num_frames - frame_len + 1, step):
                    self.video_infos.append((video_dir, start, label))

        if len(self.video_infos) == 0:
            print(f"**严重警告: 在 {root_dir} 未加载到任何样本，请检查路径和数据结构！**")
        else:
            print(f"[{mode} 模式]: 扫描完成, 共加载 {len(self.video_infos)} 个样本 ({len(self.categories)} 类)")

    def __len__(self):
        return len(self.video_infos)

    def __getitem__(self, idx):
        video_dir_key, start_idx, label = self.video_infos[idx]
        all_frame_paths = self.video_cache[video_dir_key]
        selected_paths = all_frame_paths[start_idx: start_idx + self.frame_len]

        frames = []
        for img_path in selected_paths:
            try:
                img = Image.open(img_path).convert("RGB")
                if self.transform:
                    img = self.transform(img)
                else:
                    img = transforms.ToTensor()(img)
                frames.append(img)
            except Exception as e:
                print(f"错误: 加载图像失败 {img_path} - {e}")
                raise IOError(f"加载图像失败: {img_path}")

        # [T, C, H, W] → stack
        frames = torch.stack(frames, dim=0)
        return frames, torch.tensor(label, dtype=torch.long)


def build_dataloader(root_dir, frame_len=15, batch_size=4, num_workers=0, image_size=(360, 640), mode="train"):
    """
    (新) 增加了 mode 参数, 以便正确设置 shuffle 和传递给 Dataset
    """
    NORMALIZE_MEAN = [0.485, 0.456, 0.406]
    NORMALIZE_STD = [0.229, 0.224, 0.225]
    normalize = transforms.Normalize(mean=NORMALIZE_MEAN, std=NORMALIZE_STD)
    if mode == "train":
        logging.info(f"[{mode} 模式] 启用数据增强 (抖动, 模糊, 擦除)")
        transform = transforms.Compose([
            transforms.Resize(image_size),
            transforms.RandomApply([transforms.ColorJitter(brightness=0.15, contrast=0.15, saturation=0.1)], p=0.5),
            transforms.RandomApply([transforms.GaussianBlur(kernel_size=3, sigma=(0.1, 0.5))], p=0.3),
            transforms.ToTensor(),
            transforms.RandomErasing(p=0.5, scale=(0.02, 0.1), ratio=(0.3, 3.3), value='random'),
            normalize,
        ])
    else:
        logging.info(f"[{mode} 模式] 仅使用 Resize, ToTensor, Normalize")
        transform = transforms.Compose([
            transforms.Resize(image_size),
            transforms.ToTensor(),
            normalize,
        ])

    shuffle = (mode == "train")

    dataset = FogDataset(root_dir=root_dir, frame_len=frame_len, transform=transform, mode=mode)

    loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True)
    return loader


if __name__ == "__main__":
    TRAIN_ROOT = r"F:\project\jili_three_project\foggy_project_all\1108_new\two\dataset\train"

    train_loader = build_dataloader(TRAIN_ROOT, frame_len=15, batch_size=2, num_workers=0, mode="train")
    for x, y in train_loader:
        print(f"Train x: {x.shape}, y: {y}")  # x.shape = [2, 15, 3, 360, 640]
        break
