import torch
from torchvision import datasets, transforms
from sklearn.model_selection import train_test_split
from torch.utils.data import Subset

# 数据预处理：将图像转为张量，可按需添加归一化等操作
# le数据集
transform = transforms.Compose([
    transforms.Resize((32, 32)),  # 调整图像大小为32x32
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),  # 转换为张量
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])  # 归一化处理
])

# 加载数据集：假设数据在 data/mammals 路径，datasets.ImageFolder 会按文件夹名分类
full_dataset = datasets.ImageFolder(root='data/mammals', transform=transform)

# 划分训练集、测试集：按比例拆分，test_size=0.2 表示 20% 作为测试集
train_indices, test_indices = train_test_split(
    range(len(full_dataset)),  # 数据集索引
    test_size=0.2,
    random_state=42,  # 固定随机种子，结果可复现
    stratify=full_dataset.targets  # 按类别分层抽样，保证类别分布一致（图像分类常用）
)

# 构建训练集、测试集的子集
train_dataset = Subset(full_dataset, train_indices)
test_dataset = Subset(full_dataset, test_indices)

# 在训练集中再划分出验证集，假设验证集占训练集的 20%
train_indices_in_train, val_indices = train_test_split(
    range(len(train_dataset)),
    test_size=0.2,
    random_state=42,
    stratify=[train_dataset.dataset.targets[i] for i in train_dataset.indices]
)

# 构建训练集、验证集的子集
train_dataset = Subset(train_dataset, train_indices_in_train)
val_dataset = Subset(full_dataset, [train_dataset.dataset.indices[i] for i in val_indices])

# 构建数据加载器，用于按批次加载数据（训练时常用）
train_loader = torch.utils.data.DataLoader(
    train_dataset,
    batch_size=32,  # 批次大小，按需调整
    shuffle=True  # 训练集打乱数据
)
val_loader = torch.utils.data.DataLoader(
    val_dataset,
    batch_size=32,
    shuffle=False  # 验证集一般不打乱
)
test_loader = torch.utils.data.DataLoader(
    test_dataset,
    batch_size=32,
    shuffle=False  # 测试集一般不打乱
)

# 验证：打印数据集基本信息
print(f"总样本数: {len(full_dataset)}")
print(f"训练集样本数: {len(train_dataset)}")
print(f"验证集样本数: {len(val_dataset)}")
print(f"测试集样本数: {len(test_dataset)}")
print(f"类别数量: {len(full_dataset.classes)}")