from torchvision import transforms
from torch.utils.data import Dataset,DataLoader,Subset
import itertools
import numpy as np

#-------数据集部分
# CIFAR100 数据集的均值和标准差
CIFAR100_TRAIN_MEAN = (0.4914, 0.4822, 0.4465)
CIFAR100_TRAIN_STD = (0.2023, 0.1994, 0.2010)

# 数据预处理
transform_train = transforms.Compose([
    transforms.Pad(4, padding_mode='reflect'),
    transforms.RandomCrop(32),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
])

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
])

sub_transform_train = transforms.Compose([
    transforms.Pad(4, padding_mode='reflect'),
    transforms.RandomCrop(32),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
])

sub_transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(CIFAR100_TRAIN_MEAN, CIFAR100_TRAIN_STD)
])


'''class MyDataset(Dataset):
    def __init__(self, subset, transform=None):
        self.subset = subset
        self.transform = transform

    def __getitem__(self, index):
        x, y = self.subset[index]
        if self.transform:
            x = self.transform(x)
        return x, y

    def __len__(self):
        return len(self.subset)'''


class MyDataset(Dataset):
    def __init__(self, subset, transform=None):
        self.subset = subset
        self.transform = transform
        self.targets = [label for _, label in subset]  # 提取所有标签
        self.num_classes = len(set(self.targets))  # 自动确定类别数

    def __getitem__(self, index):
        x, y = self.subset[index]
        if self.transform:
            x = self.transform(x)
        return x, y

    def __len__(self):
        return len(self.subset)
    
class CIFARSubset(Dataset):
    def __init__(self, dataset, indices, transform=None):
        """
        dataset: CIFAR-100数据集
        indices: 需要提取的子集的索引
        transform: 应用于数据的可选变换
        """
        self.data = [dataset[i][0] for i in indices]
        self.targets = [dataset[i][1] for i in indices]
        self.transform = transform

    def __getitem__(self, index):
        """
        获取指定索引的样本和标签，并应用变换
        """
        img, target = self.data[index], self.targets[index]
        
        if self.transform is not None:
            img = self.transform(img)
        
        return img, target

    def __len__(self):
        """
        返回子集中样本的数量
        """
        return len(self.data)
    



def create_exampler_subset(dataset, n_samples_per_class):
    """
    从给定的数据集中为每个类别抽取n_samples_per_class个样本。
    
    参数:
    - dataset: torchvision.datasets 中的一个数据集对象。
    - n_samples_per_class: 每个类别要抽取的样本数量。
    
    返回:
    - subset_dataset: 包含每个类别n_samples_per_class个样本的新数据集。
    """
    # 存储每个类别的索引
    class_indices = {}
    for i, (_, label) in enumerate(dataset):
        if label not in class_indices:
            class_indices[label] = []
        class_indices[label].append(i)

    # 创建一个包含所有类别n个样本的索引列表
    selected_indices = []
    for label, indices in class_indices.items():
        if len(indices) >= n_samples_per_class:
            selected_indices.extend(np.random.choice(indices, n_samples_per_class, replace=False).tolist())
        else:
            # 如果某个类别的样本数量不足n，则随机抽取该类别的所有样本
            selected_indices.extend(np.random.choice(indices, len(indices), replace=False).tolist())

    # 创建一个新的数据集，包含每个类别n个样本
    subset_dataset = Subset(dataset, selected_indices)
    return subset_dataset

def create_exampler_datasets_and_loaders(origin_train_set, origin_test_set, n_samples_per_class, batch_size, transform_train=None, transform_test=None):
    """
    为每个任务生成对应的exampler dataset，并返回exampler dataset和dataloader。
    
    参数:
    - origin_train_set: 原始训练数据集列表。
    - origin_test_set: 原始测试数据集列表。
    - n_samples_per_class: 每个类别要抽取的样本数量。
    - batch_size: DataLoader的batch_size。
    - transform_train: 训练数据的转换。
    - transform_test: 测试数据的转换。
    
    返回:
    - exampler_train_sets: 包含每个任务的exampler训练数据集列表。
    - exampler_test_sets: 包含每个任务的exampler测试数据集列表。
    - train_loaders: 包含每个任务的训练数据加载器列表。
    - test_loaders: 包含每个任务的测试数据加载器列表。
    """
    exampler_train_sets = []
    exampler_test_sets = []
    train_loaders = []
    test_loaders = []

    for train_set, test_set in zip(origin_train_set, origin_test_set):
        exampler_train = create_exampler_subset(train_set, n_samples_per_class)
        exampler_test = create_exampler_subset(test_set, n_samples_per_class)
        
        exampler_train_sets.append(exampler_train)
        exampler_test_sets.append(exampler_test)

        train_loader = DataLoader(MyDataset(exampler_train, transform=transform_train), batch_size=batch_size, shuffle=True, num_workers=4)
        test_loader = DataLoader(MyDataset(exampler_test, transform=transform_test), batch_size=batch_size, shuffle=False, num_workers=4)
        
        train_loaders.append(train_loader)
        test_loaders.append(test_loader)

    return exampler_train_sets, exampler_test_sets, train_loaders, test_loaders

def create_task_datasets_and_loaders(train_dataset, test_dataset, num_classes_per_task, label_mapping, batch_size=64, 
                                   transform_train=None, transform_test=None):
    # 初始化存储训练和测试数据加载器的列表
    origin_train_set = []
    origin_test_set = []
    train_loaders = []
    test_loaders = []
    
    # 计算每个任务的类别索引范围
    cumulative_indices = list(itertools.accumulate([0] + num_classes_per_task))
    task_class_orders = []  # 存储每个任务的类别顺序

    # 根据每个任务的类别索引范围创建数据集和数据加载器
    for start_idx, end_idx in zip(cumulative_indices, cumulative_indices[1:]):
        # 获取当前任务的类别顺序
        task_class_order = [label_mapping[i] for i in range(start_idx, end_idx)]
        task_class_orders.append(task_class_order)
        
        # 获取当前任务的训练和测试数据索引
        train_indices = [i for i, label in enumerate(train_dataset.targets) if label in task_class_order]
        test_indices = [i for i, label in enumerate(test_dataset.targets) if label in task_class_order]
        
        # 创建子集
        subset_train = Subset(train_dataset, train_indices)
        subset_test = Subset(test_dataset, test_indices)
        origin_train_set.append(subset_train)
        origin_test_set.append(subset_test)

        # 创建数据加载器
        train_dataset_task = MyDataset(subset_train, transform=transform_train)
        test_dataset_task = MyDataset(subset_test, transform=transform_test)

        train_loader = DataLoader(train_dataset_task, batch_size=batch_size, shuffle=True, num_workers=4)
        test_loader = DataLoader(test_dataset_task, batch_size=batch_size, shuffle=False, num_workers=4)
        
        train_loaders.append(train_loader)
        test_loaders.append(test_loader)
    
    return origin_train_set, origin_test_set, train_loaders, test_loaders, task_class_orders



