# 简单示例，使用PyTorch实现简单的神经网络
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt

# 设置随机种子确保结果可复现
torch.manual_seed(42)

# 1. 数据准备
# 定义数据预处理
transform = transforms.Compose([
    transforms.ToTensor(),  # 将PIL图像转换为Tensor
    transforms.Normalize((0.1307,), (0.3081,))  # MNIST数据集的标准化参数
])

# 下载并加载训练集
train_dataset = torchvision.datasets.MNIST(
    root='./data',
    train=True,
    download=True,
    transform=transform
)
train_loader = torch.utils.data.DataLoader(
    train_dataset,
    batch_size=64,
    shuffle=True
)

# 下载并加载测试集（使用问题中的代码）
test_dataset = torchvision.datasets.MNIST(
    root='./data',
    train=False,
    download=True,
    transform=transform
)
test_loader = torch.utils.data.DataLoader(
    test_dataset,
    batch_size=64,
    shuffle=False
)


# 2. 定义神经网络模型
class SimpleNN(nn.Module):
    def __init__(self):
        super(SimpleNN, self).__init__()
        self.flatten = nn.Flatten()
        self.linear_relu_stack = nn.Sequential(
            nn.Linear(28 * 28, 128),  # 输入层 (28x28=784像素)
            nn.ReLU(),  # 激活函数
            nn.Linear(128, 64),  # 隐藏层
            nn.ReLU(),
            nn.Linear(64, 10)  # 输出层 (10个数字类别)
        )

    def forward(self, x):
        x = self.flatten(x)
        logits = self.linear_relu_stack(x)
        return logits


# 创建模型实例
model = SimpleNN()
print(model)  # 打印模型结构

# 3. 设置损失函数和优化器
loss_fn = nn.CrossEntropyLoss()  # 交叉熵损失函数
optimizer = optim.SGD(model.parameters(), lr=0.01)  # 随机梯度下降优化器


# 4. 训练模型
def train(dataloader, model, loss_fn, optimizer):
    model.train()  # 设置为训练模式
    for batch, (X, y) in enumerate(dataloader):
        # 前向传播
        pred = model(X)
        loss = loss_fn(pred, y)

        # 反向传播
        optimizer.zero_grad()  # 清除之前的梯度
        loss.backward()  # 计算梯度
        optimizer.step()  # 更新参数

        # 每100个batch打印一次训练状态
        if batch % 100 == 0:
            print(f"训练进度: [{batch * len(X)}/{len(dataloader.dataset)}] | 损失: {loss.item():.4f}")


# 5. 测试模型
def test(dataloader, model):
    model.eval()  # 设置为评估模式
    test_loss, correct = 0, 0
    with torch.no_grad():  # 禁用梯度计算
        for X, y in dataloader:
            pred = model(X)
            test_loss += loss_fn(pred, y).item()
            correct += (pred.argmax(1) == y).type(torch.float).sum().item()

    test_loss /= len(dataloader)
    accuracy = correct / len(dataloader.dataset)
    print(f"测试结果: 平均损失: {test_loss:.4f}, 准确率: {accuracy * 100:.1f}%\n")
    return accuracy


# 6. 执行训练和测试
epochs = 3  # 训练轮数
for epoch in range(epochs):
    print(f"Epoch {epoch + 1}\n-------------------------------")
    train(train_loader, model, loss_fn, optimizer)
    test(test_loader, model)
print("训练完成！")


# 7. 可视化预测结果
def visualize_predictions():
    model.eval()
    fig, axes = plt.subplots(3, 3, figsize=(9, 9))
    for i in range(9):
        img, label = test_dataset[i]
        with torch.no_grad():
            pred = model(img.unsqueeze(0))
            predicted_label = pred.argmax().item()

        ax = axes[i // 3, i % 3]
        ax.imshow(img.squeeze(), cmap='gray')
        ax.set_title(f"真实: {label}, 预测: {predicted_label}")
        ax.axis('off')
    plt.tight_layout()
    plt.show()


# 显示一些预测结果
visualize_predictions()