import torch
import os
from torchvision import datasets, transforms
from torch.utils.data.sampler import SubsetRandomSampler

def load_datasets(dataset_name, subset_size=None):
    if dataset_name == 'mnist':
        # 检查是否存在MNIST数据集
        dataset_path = './data/MNIST/raw'
        if not os.path.exists(dataset_path):
            print("Downloading MNIST dataset...")
        else:
            print("MNIST dataset already downloaded.")

        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))  # 单通道归一化
        ])
        data = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
        test_data = datasets.MNIST(root='./data', train=False, download=True, transform=transform)

    elif dataset_name == 'cifar':
        # CIFAR10 解压后的文件夹路径
        dataset_path = './data/cifar-10-batches-py'

        # 检查解压后的文件夹是否存在
        if not os.path.exists(dataset_path):
            print("CIFAR10 dataset not found, downloading...")
            data = datasets.CIFAR10(root='./data', train=True, download=True, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))  # 三通道归一化
            ]))
            test_data = datasets.CIFAR10(root='./data', train=False, download=True, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))
            ]))
        else:
            print("CIFAR10 dataset already downloaded and extracted.")
            data = datasets.CIFAR10(root='./data', train=True, download=False, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))
            ]))
            test_data = datasets.CIFAR10(root='./data', train=False, download=False, transform=transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))
            ]))

    else:
        raise ValueError("Dataset not supported")

    # 如果需要截取数据集的子集
    if subset_size is not None:
        train_indices = torch.randperm(len(data))[:subset_size]
        test_indices = torch.randperm(len(test_data))[:subset_size]
        train_sampler = SubsetRandomSampler(train_indices)
        test_sampler = SubsetRandomSampler(test_indices)
    else:
        train_sampler = None
        test_sampler = None

    # 创建数据加载器，直接将 sampler 作为参数传递
    train_loader = torch.utils.data.DataLoader(data, batch_size=32, sampler=train_sampler if subset_size is not None else None)
    test_loader = torch.utils.data.DataLoader(test_data, batch_size=32, sampler=test_sampler if subset_size is not None else None)

    return train_loader, test_loader
