#train.py文件
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
from model5 import get_model  # 从 model4.py 导入模型
# 定义数据预处理
transform = transforms.Compose([
    transforms.Resize(256),  # 将图像大小调整为 256x256
    transforms.ToTensor(),  # 将图像转换为张量
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  # 归一化
])
# 加载 CIFAR-100 数据集
train_dataset = datasets.CIFAR100(root='./', train=True, download=True, transform=transform)
test_dataset = datasets.CIFAR100(root='./', train=False, download=True, transform=transform)
# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=32, shuffle=False)
# 获取模型
model = get_model(num_classes=100)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# 指定设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# 训练模型
def train(model, device, train_loader, optimizer, criterion, epochs=1):
    model.train()
    for epoch in range(epochs):
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            if batch_idx % 100 == 0:
                print(f'模型训练第 {epoch+1} 批：[{batch_idx * len(data)}/{len(train_loader.dataset)} ({100. * batch_idx / len(train_loader):.0f}%)]\tLoss: {loss.item():.6f}')
        # 保存整个模型
        torch.save(model, '模型文件.pth')
        print("训练结束，模型文件保存成功")
# 训练和测试
train(model, device, train_loader, optimizer, criterion, epochs=1)

