import torch
from torchvision import datasets, transforms
from sklearn.model_selection import train_test_split
from torch.utils.data import Subset

# 数据预处理：将图像转为张量，可按需添加归一化等操作
# alex数据集
transform = transforms.Compose([
    transforms.Resize(256),  # 调整大小
    transforms.CenterCrop(224),  # 中心裁剪
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

# 加载数据集：假设数据在 data/mammals 路径，datasets.ImageFolder 会按文件夹名分类
full_dataset = datasets.ImageFolder(root='data/mammals', transform=transform)

# 首先划分出测试集和训练集（此时训练集包含后续的训练集和验证集）
train_indices, test_indices = train_test_split(
    range(len(full_dataset)),  # 数据集索引
    test_size=0.2,
    random_state=42,  # 固定随机种子，结果可复现
    stratify=full_dataset.targets  # 按类别分层抽样，保证类别分布一致
)

# 从训练集中再划分出验证集，这里使用0.25的比例，使得验证集占总数据的0.2*0.25=0.2=20%
train_indices, val_indices = train_test_split(
    train_indices,  # 注意这里使用的是上面划分得到的train_indices
    test_size=0.25,  # 0.25 * 0.8 = 0.2，即总数据的20%作为验证集
    random_state=42,
    stratify=[full_dataset.targets[i] for i in train_indices]  # 保持类别分布一致
)

# 构建训练集、验证集和测试集的子集
train_dataset = Subset(full_dataset, train_indices)
val_dataset = Subset(full_dataset, val_indices)
test_dataset = Subset(full_dataset, test_indices)

# 构建数据加载器
train_loader = torch.utils.data.DataLoader(
    train_dataset,
    batch_size=32,  # 批次大小，按需调整
    shuffle=True  # 训练集打乱数据
)
val_loader = torch.utils.data.DataLoader(
    val_dataset,
    batch_size=32,
    shuffle=False  # 验证集一般不打乱
)
test_loader = torch.utils.data.DataLoader(
    test_dataset,
    batch_size=32,
    shuffle=False  # 测试集一般不打乱
)

# 验证：打印数据集基本信息
print(f"总样本数: {len(full_dataset)}")
print(f"训练集样本数: {len(train_dataset)} ")
print(f"验证集样本数: {len(val_dataset)} ")
print(f"测试集样本数: {len(test_dataset)} ")
print(f"类别数量: {len(full_dataset.classes)}")