import os
from PIL import Image
from torchvision import transforms
from torch.utils.data import DataLoader, Dataset


class TinyImageNetDataset(Dataset):
    def __init__(self, root_dir, split='train', transform=None):
        """
        适配新数据结构的Tiny ImageNet数据集加载器

        参数:
            root_dir: 数据集根目录 (包含tiny-imagenet-200文件夹)
            split: 'train' 或 'val'
            transform: 数据增强和转换
        """
        self.root_dir = os.path.join(root_dir, 'tiny-imagenet-200')
        self.split = split
        self.transform = transform

        # 获取类别列表（从训练集）
        train_classes_dir = os.path.join(self.root_dir, 'train')
        self.classes = sorted([d for d in os.listdir(train_classes_dir)
                               if os.path.isdir(os.path.join(train_classes_dir, d))])
        self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
        self.num_classes = len(self.classes)

        # 根据split加载数据
        if split == 'train':
            self._load_train_data()
        else:  # val
            self._load_val_data()

    def _load_train_data(self):
        """加载训练集数据"""
        self.image_paths = []
        self.labels = []

        train_dir = os.path.join(self.root_dir, 'train')
        for cls in self.classes:
            cls_dir = os.path.join(train_dir, cls, 'images')
            for img_name in os.listdir(cls_dir):
                if img_name.lower().endswith(('.jpeg', '.jpg')):
                    self.image_paths.append(os.path.join(cls_dir, img_name))
                    self.labels.append(self.class_to_idx[cls])

    def _load_val_data(self):
        """加载验证集数据（适配新结构）"""
        self.image_paths = []
        self.labels = []

        val_dir = os.path.join(self.root_dir, 'val')

        # 遍历每个类别目录
        for cls in os.listdir(val_dir):
            cls_dir = os.path.join(val_dir, cls)
            if os.path.isdir(cls_dir):
                images_dir = os.path.join(cls_dir, 'images')
                if os.path.exists(images_dir):
                    # 读取该类别下的所有图片
                    for img_name in os.listdir(images_dir):
                        if img_name.lower().endswith(('.jpeg', '.jpg')):
                            self.image_paths.append(os.path.join(images_dir, img_name))
                            self.labels.append(self.class_to_idx[cls])

                # 可选：检查是否有单独的标注文件
                cls_annotations = os.path.join(cls_dir, f'{cls}_annotations.txt')
                if os.path.exists(cls_annotations):
                    # 如果需要，可以在这里验证图片和标注的对应关系
                    pass

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        img_path = self.image_paths[idx]
        label = self.labels[idx]

        try:
            img = Image.open(img_path).convert('RGB')
        except Exception as e:
            print(f"⚠️ 图片加载失败: {img_path}, 错误: {e}")
            # 创建灰色占位图
            img = Image.new('RGB', (64, 64), color='gray')
            label = 0  # 使用默认标签

        if self.transform:
            img = self.transform(img)

        return img, label


def get_tiny_imagenet_data(batch_size=128, num_workers=4):
    """
    获取Tiny ImageNet数据加载器
    参数:
        batch_size: 批次大小
        num_workers: 数据加载线程数
    返回:
        train_loader, val_loader, num_classes
    """

    # 对数据不进行增强
    no_aug_transform = transforms.Compose([
        transforms.Resize(64),  # 确保图像尺寸匹配模型输入
        transforms.CenterCrop(64),  # 中心裁剪保证尺寸精确
        transforms.ToTensor(),  # 转换为张量
        transforms.Normalize(  # 使用与之前相同的归一化参数
            mean=[0.4802, 0.4481, 0.3975],
            std=[0.2302, 0.2265, 0.2262]
        )
    ])

    # 数据增强和归一化
    train_transform = transforms.Compose([
        transforms.RandomCrop(64, padding=4),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.ColorJitter(brightness=0.2, contrast=0.2),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.4802, 0.4481, 0.3975],
                             std=[0.2302, 0.2265, 0.2262])
    ])

    # 验证集transform（必须添加尺寸处理）
    val_transform = transforms.Compose([
        transforms.Resize(72),  # ✅ 新增：调整尺寸
        transforms.CenterCrop(64),  # ✅ 新增：确保64x64
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.4802, 0.4481, 0.3975],
                             std=[0.2302, 0.2265, 0.2262])
    ])

    # 修改路径处理部分
    script_dir = os.path.dirname(os.path.abspath(__file__))
    parent_dir = os.path.dirname(script_dir)  # 获取父目录路径
    data_path = os.path.join(parent_dir, 'data')
    tiny_path = os.path.join(data_path, 'tiny-imagenet-200')

    # 检查路径是否存在
    if not os.path.exists(tiny_path):
        raise FileNotFoundError(f"无法找到Tiny ImageNet数据集，请确认数据已解压到: {tiny_path}")

    train_path = os.path.join(tiny_path, 'train')
    val_path = os.path.join(tiny_path, 'val')

    print(f"训练集路径: {train_path} (存在: {os.path.exists(train_path)})")
    print(f"验证集路径: {val_path} (存在: {os.path.exists(val_path)})")

    # 加载数据集
    train_dataset = TinyImageNetDataset(root_dir=data_path, split='train', transform=train_transform)
    val_dataset = TinyImageNetDataset(root_dir=data_path, split='val', transform=val_transform)

    # 创建数据加载器
    train_loader = DataLoader(train_dataset, batch_size=batch_size,
                              shuffle=True, num_workers=num_workers, pin_memory=True)
    val_loader = DataLoader(val_dataset, batch_size=batch_size,
                            shuffle=False, num_workers=num_workers, pin_memory=True)

    num_classes = train_dataset.num_classes

    print(f"训练样本数: {len(train_dataset)}")
    print(f"验证样本数: {len(val_dataset)}")
    print(f"类别数量: {num_classes}")
    print(f"图像尺寸: 64x64")

    return train_loader, val_loader, num_classes

