import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 自动选择GPU/CPU
num_epoch = 5  # 训练迭代次数
batch_size = 64  # 批次大小
learning_rate = 0.001  # 学习率

transform = transforms.Compose([
    transforms.ToTensor(),  # 转换为Tensor格式
    transforms.Normalize((0.1307,), (0.3081,))  # MNIST数据集标准化参数
])

# 加载训练集和测试集
train_data = datasets.MNIST(
    root='./data', train=True, download=True, transform=transform
)
test_data = datasets.MNIST(
    root='./data', train=False, download=True, transform=transform
)

# 创建数据加载器（分批次）
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

# 定义神经网络模型（CNN结构）
class MNISTNet(nn.Module):
    def __init__(self):
        super(MNISTNet, self).__init__()
        # 卷积层+池化层
        self.conv_layers = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=5, padding=2),  # 输入1通道（灰度图），输出32通道
            nn.ReLU(),  # 激活函数
            nn.MaxPool2d(2, stride=2),  # 最大池化，尺寸减半
            nn.Conv2d(32, 64, kernel_size=5, padding=2),  # 输入32通道，输出64通道
            nn.ReLU(),
            nn.MaxPool2d(2, stride=2)
        )
        # 全连接层
        self.fc_layers = nn.Sequential(
            nn.Linear(64 * 7 * 7, 1024),  # 展平后输入维度：64通道×7×7像素
            nn.ReLU(),
            nn.Dropout(0.5),  # Dropout层防止过拟合
            nn.Linear(1024, 10)  # 输出10类（0-9数字）
        )

    def forward(self, x):
        # 前向传播：卷积特征提取 → 展平 → 全连接分类
        x = self.conv_layers(x)
        x = x.view(-1, 64 * 7 * 7)  # 展平操作
        x = self.fc_layers(x)
        return x

# 初始化模型、损失函数、优化器
model = MNISTNet().to(device)  # 模型移到指定设备
criterion = nn.CrossEntropyLoss()  # 交叉熵损失（适配分类任务）
optimizer = optim.Adam(model.parameters(), lr=learning_rate)  # Adam优化器

# 模型训练流程
def train(model, train_loader, criterion, optimizer, epoch):
    model.train()  # 启用训练模式（BatchNorm/Dropout生效）
    torch.set_grad_enabled(True)  # 打开梯度计算
    total_loss = 0.0

    for batch_idx, (data, label) in enumerate(train_loader):
        # 数据移到指定设备
        data = data.to(device)
        label = label.to(device)

        optimizer.zero_grad()  # 清空梯度（避免累积）
        output = model(data)  # 前向传播：计算预测值
        loss = criterion(output, label)  # 计算损失
        loss.backward()  # 反向传播：计算梯度
        optimizer.step()  # 优化器更新参数

        total_loss += loss.item()  # 累积损失值

        # 每100批次打印一次训练状态
        if (batch_idx + 1) % 100 == 0:
            avg_loss = total_loss / 100
            print(f'Epoch [{epoch+1}/{num_epoch}], Batch [{batch_idx+1}/{len(train_loader)}], Train Loss: {avg_loss:.4f}')
            total_loss = 0.0

# 模型测试流程
def test(model, test_loader, criterion):
    model.eval()  # 启用测试模式（BatchNorm/Dropout固定）
    torch.set_grad_enabled(False)  # 关闭梯度计算（节省资源）
    test_loss = 0.0
    correct = 0  # 统计正确预测数

    with torch.no_grad():  # 上下文管理器：禁用梯度计算
        for data, label in test_loader:
            data = data.to(device)
            label = label.to(device)

            output = model(data)
            test_loss += criterion(output, label).item()  # 累积测试损失
            pred = output.argmax(dim=1, keepdim=True)  # 取预测概率最大的类别
            correct += pred.eq(label.view_as(pred)).sum().item()  # 统计正确数

    # 计算测试集平均损失和准确率
    avg_test_loss = test_loss / len(test_loader)
    accuracy = 100. * correct / len(test_loader.dataset)
    print(f'Test Loss: {avg_test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ({accuracy:.2f}%)\n')

# 执行训练与测试
if __name__ == "__main__":
    print(f"Training on {device}...")
    for epoch in range(num_epoch):
        train(model, train_loader, criterion, optimizer, epoch)  # 训练一轮
        test(model, test_loader, criterion)  # 测试一轮

    # 保存训练好的模型（可选）
    torch.save(model.state_dict(), "mnist_model.pth")
    print("Training finished! Model saved as 'mnist_model.pth'")