# 数字识别任务并不一定都使用28x28像素尺寸。MNIST数据集的标准图像尺寸是28x28，但实际应用中可以根据需求调整。

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt

# 检查PyTorch版本和设备
print(f"PyTorch版本: {torch.__version__}")
print(f"CUDA可用: {torch.cuda.is_available()}")
device = torch.device("cuda" if torch.cuda.is_available() else "mps")
print(f"使用设备: {device}\n")

# 数据预处理
# 在提供的代码片段中，transform 是一个变量，它存储了一个数据预处理的操作流程。具体来说，这个流程包括两个步骤：
# 1. ToTensor()：将数据转换成张量（Tensor）。张量是一种多维数组，常用于深度学习模型中。这个步骤将原始数据（如图像的像素值）转换成适合模型输入的格式。
# 2. Normalize((0.1307,), (0.3081,))：对数据进行归一化处理。归一化是将数据按比例缩放，使其落入一个特定的范围内（通常是 [0, 1] 或者 [-1, 1]），这样可以提高模型的训练速度和稳定性。这里的具体数值 (0.1307, 0.3081) 是指数据集的均值和标准差，用于将数据标准化。
# 总结来说，这段代码的作用是将输入数据转换成适合深度学习模型处理的格式，并确保数据在统一的尺度上，以便模型能够更好地学习和做出预测。

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

# 加载数据集
train_dataset = torchvision.datasets.MNIST(
    root='./data',
    train=True,
    download=True,
    transform=transform
)

test_dataset = torchvision.datasets.MNIST(
    root='./data',
    train=False,
    download=True,
    transform=transform
)

# 创建数据加载器
train_loader = DataLoader(
    train_dataset,
    batch_size=64,
    shuffle=True,
    num_workers=2
)

test_loader = DataLoader(
    test_dataset,
    batch_size=1000,
    shuffle=False,
    num_workers=2
)


# 定义CNN模型
class CNN(nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.dropout1 = nn.Dropout(0.25)
        self.dropout2 = nn.Dropout(0.5)
        self.fc1 = nn.Linear(64 * 7 * 7, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = self.pool(torch.relu(self.conv1(x)))
        x = self.pool(torch.relu(self.conv2(x)))
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = torch.relu(self.fc1(x))
        x = self.dropout2(x)
        x = self.fc2(x)
        return x


# 初始化模型
model = CNN().to(device)
print("模型结构:")
print(model)
print()

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)


# 训练函数
def train(model, loader, criterion, optimizer, epoch):
    model.train()
    total_loss = 0
    correct = 0
    total = 0

    for batch_idx, (inputs, targets) in enumerate(loader):
        inputs, targets = inputs.to(device), targets.to(device)

        # 前向传播
        outputs = model(inputs)
        loss = criterion(outputs, targets)

        # 反向传播和优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # 统计信息
        total_loss += loss.item()
        _, predicted = outputs.max(1)
        total += targets.size(0)
        correct += predicted.eq(targets).sum().item()

        if batch_idx % 100 == 99:
            print(f'Epoch: {epoch} | Batch: {batch_idx + 1}/{len(loader)} | '
                  f'Loss: {loss.item():.4f} | Acc: {100. * correct / total:.2f}%')

    avg_loss = total_loss / len(loader)
    acc = 100. * correct / total
    return avg_loss, acc


# 测试函数
def test(model, loader, criterion):
    model.eval()
    total_loss = 0
    correct = 0
    total = 0

    with torch.no_grad():
        for inputs, targets in loader:
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            loss = criterion(outputs, targets)

            total_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

    avg_loss = total_loss / len(loader)
    acc = 100. * correct / total
    return avg_loss, acc


# 训练循环
epochs = 5
train_losses, train_accs = [], []
test_losses, test_accs = [], []

print("开始训练...\n")
for epoch in range(1, epochs + 1):
    train_loss, train_acc = train(model, train_loader, criterion, optimizer, epoch)
    test_loss, test_acc = test(model, test_loader, criterion)

    train_losses.append(train_loss)
    train_accs.append(train_acc)
    test_losses.append(test_loss)
    test_accs.append(test_acc)

    print(f"\nEpoch: {epoch} | 训练集: Loss={train_loss:.4f} Acc={train_acc:.2f}% | "
          f"测试集: Loss={test_loss:.4f} Acc={test_acc:.2f}%\n")

# 可视化结果
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.plot(range(1, epochs + 1), train_losses, 'o-', label='训练集')
plt.plot(range(1, epochs + 1), test_losses, 'o-', label='测试集')
plt.xlabel('Epoch')
plt.ylabel('损失值')
plt.title('训练和测试损失')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(range(1, epochs + 1), train_accs, 'o-', label='训练集')
plt.plot(range(1, epochs + 1), test_accs, 'o-', label='测试集')
plt.xlabel('Epoch')
plt.ylabel('准确率 (%)')
plt.title('训练和测试准确率')
plt.legend()

plt.tight_layout()
plt.savefig('mnist_results.png')
print("结果图表已保存为 'mnist_results.png'")

# 保存模型
torch.save(model.state_dict(), 'mnist_cnn.pth')
print("模型已保存为 'mnist_cnn.pth'")


# 显示一些测试样本的预测结果
def visualize_predictions():
    model.eval()
    dataiter = iter(test_loader)
    images, labels = next(dataiter)
    images, labels = images.to(device), labels.to(device)

    with torch.no_grad():
        outputs = model(images)
        _, preds = outputs.max(1)

    # 将数据移回CPU进行可视化
    images = images.cpu()
    labels = labels.cpu()
    preds = preds.cpu()

    plt.figure(figsize=(10, 10))
    for i in range(25):
        plt.subplot(5, 5, i + 1)
        plt.imshow(images[i][0], cmap='gray')
        plt.title(f"预测: {preds[i]}\n真实: {labels[i]}", fontsize=9)
        plt.axis('off')
    plt.tight_layout()
    plt.savefig('mnist_predictions.png')
    print("预测样本可视化已保存为 'mnist_predictions.png'")


visualize_predictions()