import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
# 定义数据预处理转换
# 1. ToTensor(): 将PIL图像或numpy.ndarray转换为Tensor，并缩放到[0, 1]
# 2. Normalize(mean, std): 用均值和标准差对张量进行标准化
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,)) # MNIST数据集的全局均值和标准差
])

# 下载并加载训练集
train_set = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
train_loader = DataLoader(train_set, batch_size=64, shuffle=True) # 批量大小为64，每次epoch打乱数据

# 下载并加载测试集
test_set = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
test_loader = DataLoader(test_set, batch_size=1000, shuffle=False) # 测试时不需要打乱
# 获取一个批次的训练数据
dataiter = iter(train_loader)
images, labels = next(dataiter)

# 显示图像和标签
fig, axes = plt.subplots(1, 5, figsize=(10, 4))
for i in range(5):
    ax = axes[i]
    ax.imshow(images[i].squeeze(), cmap='gray') # 去掉通道维度并显示灰度图
    ax.set_title(f'Label: {labels[i]}')
    ax.axis('off')
plt.show()
class MLP(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(MLP, self).__init__()
        # 定义网络层
        self.fc1 = nn.Linear(input_size, hidden_size)  # 第一个全连接层
        self.relu = nn.ReLU()                          # 激活函数
        self.fc2 = nn.Linear(hidden_size, num_classes) # 第二个全连接层（输出层）
        # 注意：输出层通常不接激活函数，因为后面的损失函数CrossEntropyLoss包含了Softmax

    def forward(self, x):
        # 定义前向传播过程
        x = x.view(-1, 28*28)   # 将图像展平为向量 [batch_size, 784]
        out = self.fc1(x)       # 输入到第一层
        out = self.relu(out)    # 经过激活函数
        out = self.fc2(out)     # 输入到输出层
        return out

# 初始化模型
input_size = 784    # 28x28
hidden_size = 128   # 隐藏层神经元数量，可调整
num_classes = 10    # 输出类别数 (0-9)
model = MLP(input_size, hidden_size, num_classes)

# 如果检测到GPU，则将模型转移到GPU上
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
print(f"模型已加载到设备: {device}")
criterion = nn.CrossEntropyLoss() # 交叉熵损失函数，适用于多分类问题
optimizer = optim.Adam(model.parameters(), lr=0.001) # 使用Adam优化器，学习率设为0.001
num_epochs = 5 # 训练轮数，可调整
train_losses = [] # 记录训练损失

for epoch in range(num_epochs):
    model.train() # 设置模型为训练模式
    running_loss = 0.0

    for i, (images, labels) in enumerate(train_loader):
        # 将数据也移动到相同设备（CPU或GPU）
        images = images.to(device)
        labels = labels.to(device)

        # 前向传播
        outputs = model(images)
        loss = criterion(outputs, labels)

        # 反向传播与优化
        optimizer.zero_grad() # 清空上一步的梯度
        loss.backward()       # 反向传播，计算梯度
        optimizer.step()      # 更新参数

        running_loss += loss.item()

    epoch_loss = running_loss / len(train_loader)
    train_losses.append(epoch_loss)
    print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {epoch_loss:.4f}')

print('训练完成！')
model.eval()  # 设置模型为评估模式（会关闭dropout等）
with torch.no_grad(): # 禁用梯度计算，节省内存和计算资源
    correct = 0
    total = 0
    for images, labels in test_loader:
        images = images.to(device)
        labels = labels.to(device)
        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1) # 获取预测类别
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    accuracy = 100 * correct / total
    print(f'在测试集10000张图片上的准确率为: {accuracy:.2f}%')
# 绘制训练损失曲线
plt.plot(range(1, num_epochs+1), train_losses, label='Training Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss over Epochs')
plt.legend()
plt.show()

# 可视化测试集上的几个预测样本
dataiter = iter(test_loader)
images, labels = next(dataiter)
images, labels = images.to(device), labels.to(device)

outputs = model(images)
_, preds = torch.max(outputs, 1)

# 将数据移回CPU以便用Matplotlib显示
images = images.cpu()
fig, axes = plt.subplots(2, 5, figsize=(12, 6))
for i, ax in enumerate(axes.flat):
    ax.imshow(images[i].squeeze(), cmap='gray')
    ax.set_title(f'True: {labels[i].item()}\nPred: {preds[i].item()}')
    ax.axis('off')
plt.tight_layout()
plt.show()