import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, random_split, Subset


class MNISTDataModule:
    def __init__(self, batch_size=64, train_samples=None, val_samples=None, data_dir='./data'):
        """
        Args:
            batch_size: 每个batch的大小
            train_samples: 训练集样本数 (None表示使用全部)
            val_samples: 验证集样本数 (None表示使用全部)
            data_dir: 数据存储路径
        """
        self.batch_size = batch_size
        self.train_samples = train_samples
        self.val_samples = val_samples
        self.data_dir = data_dir

        # 数据预处理
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])

    def prepare_data(self):
        """下载数据集（如果不存在）"""
        datasets.MNIST(self.data_dir, train=True, download=True)
        datasets.MNIST(self.data_dir, train=False, download=True)

    def setup(self):
        """加载并分割数据集"""
        # 完整训练集
        full_train = datasets.MNIST(
            self.data_dir,
            train=True,
            transform=self.transform
        )

        # 完整测试集
        full_test = datasets.MNIST(
            self.data_dir,
            train=False,
            transform=self.transform
        )

        # 限制训练集样本数
        if self.train_samples is not None:
            train_indices = torch.randperm(len(full_train))[:self.train_samples]
            self.train_data = Subset(full_train, train_indices)
        else:
            self.train_data = full_train

        # 限制验证集样本数
        if self.val_samples is not None:
            val_indices = torch.randperm(len(full_test))[:self.val_samples]
            self.val_data = Subset(full_test, val_indices)
        else:
            self.val_data = full_test

    def get_dataloaders(self):
        """生成DataLoader"""
        return {
            'train': DataLoader(
                self.train_data,
                batch_size=self.batch_size,
                shuffle=True,
                num_workers=2  # CPU环境下建议2-4个workers
            ),
            'val': DataLoader(
                self.val_data,
                batch_size=self.batch_size,
                shuffle=False,
                num_workers=2
            )
        }