import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import matplotlib
import matplotlib.pyplot as plt
import os

matplotlib.use('TkAgg')

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
non_blocking = torch.cuda.is_available()

# Compose 的作用就是 把多个预处理操作组合到一起，按顺序依次执行。
# 这里你传了一个列表：[ToTensor(), Normalize(...)]
# 所以每张 MNIST 图片会先被 ToTensor 处理，再被 Normalize 处理。
transform = transforms.Compose([
    # MNIST 原始图片是 PIL.Image 格式（灰度图，像素 0–255 的整数）。
    # ToTensor 会做三件事：
    # 把 PIL.Image 或 numpy.ndarray 转成 PyTorch 的 Tensor。
    # 像素值从 [0, 255] 线性缩放到 [0.0, 1.0]。
    # 例如像素值 128 会变成 128/255 ≈ 0.502。
    # 改变维度顺序，从 [H, W, C] 变为 [C, H, W]（PyTorch 里的通道在最前）。
    # 👉 对于 MNIST（灰度图），最终 shape 是 [1, 28, 28]。
    transforms.ToTensor(),
    # Normalize 的作用是对 Tensor 做归一化
    # 让数据中心化（均值接近 0），加快训练收敛。
    # 缩放到标准差为 1 的范围，避免某些维度过大影响梯度更新。
    # 保证训练集和测试集分布一致。
    transforms.Normalize((0.1307,), (0.3081,))
])

train_dataset = datasets.MNIST(root='../data/Mnist/', train=True, transform=transform, download=True)
test_dataset = datasets.MNIST(root='../data/Mnist/', train=False, transform=transform, download=True)

batch_size = 128
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.linear1 = nn.Linear(784, 512)
        self.linear2 = nn.Linear(512, 256)
        self.linear3 = nn.Linear(256, 128)
        self.linear4 = nn.Linear(128, 64)
        self.linear5 = nn.Linear(64, 10)
        self.act = nn.ReLU()

    def forward(self, x):
        x = x.view(-1, 784)
        x = self.act(self.linear1(x))
        x = self.act(self.linear2(x))
        x = self.act(self.linear3(x))
        x = self.act(self.linear4(x))
        x = self.linear5(x)
        return x


model = Net().to(device)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)


def train(model, train_loader, optimizer, criterion, device):
    train_loss_epoch = 0
    total_correct = 0
    total_samples = 0
    for i, (inputs, labels) in enumerate(train_loader):
        inputs = inputs.to(device, non_blocking=non_blocking)
        labels = labels.to(device, non_blocking=non_blocking)
        logits = model(inputs)
        loss = criterion(logits, labels)
        optimizer.zero_grad(set_to_none=True)
        loss.backward()
        optimizer.step()
        train_loss_epoch += loss.item()
        pred = logits.argmax(dim=1)
        correct = (pred == labels).sum().item()
        total_correct += correct
        total_samples += labels.size(0)
    train_loss_epoch = train_loss_epoch / len(train_loader)
    train_acc_epoch = total_correct / total_samples
    return train_loss_epoch, train_acc_epoch


def test(model, test_loader, criterion, device):
    test_loss_epoch = 0
    total_correct = 0
    total_samples = 0
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs = inputs.to(device, non_blocking=non_blocking)
            labels = labels.to(device, non_blocking=non_blocking)
            logits = model(inputs)
            loss = criterion(logits, labels)
            test_loss_epoch += loss.item()
            # 获取模型输出logits中每个样本预测概率最大的类别索引，即模型的预测结果。
            # argmax(dim=1)表示在第1维度（类别维度）上找到最大值的索引，返回每个样本的预测类别标签。
            pred = logits.argmax(dim=1)
            correct = (pred == labels).sum().item()
            total_correct += correct
            total_samples += labels.size(0)
    test_loss_epoch = test_loss_epoch / len(test_loader)
    test_acc_epoch = total_correct / total_samples
    return test_loss_epoch, test_acc_epoch


epochs = 15
train_loss = []
train_acc = []
test_loss = []
test_acc = []
for epoch in range(epochs):
    model.train()
    train_loss_epoch, train_acc_epoch = train(model, train_dataloader, optimizer, criterion, device)
    model.eval()
    test_loss_epoch, test_acc_epoch = test(model, test_dataloader, criterion, device)
    train_loss.append(train_loss_epoch)
    train_acc.append(train_acc_epoch)
    test_loss.append(test_loss_epoch)
    test_acc.append(test_acc_epoch)
    template = "epoch: {:3d}, train_loss: {:.3f}, train_acc: {:.1f}%, test_loss: {:.3f}, test_acc: {:.1f}%"
    print(template.format(epoch + 1, train_loss_epoch, train_acc_epoch * 100, test_loss_epoch, test_acc_epoch * 100))

plt.figure(figsize=(16, 6))
plt.subplot(1, 2, 1)
plt.plot(range(epochs), train_loss, label="train_loss")
plt.plot(range(epochs), test_loss, label="test_loss")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(range(epochs), train_acc, label="train_acc")
plt.plot(range(epochs), test_acc, label="test_acc")
plt.legend()
plt.show()

model_path = "../models"
if not os.path.exists(model_path):
    os.makedirs(model_path)
torch.save(model.state_dict(), os.path.join(model_path, "mnist_model.pth"))

# "第0个神经元输出第0个类别"是因为：
# 数据集约定：MNIST数据集内置了 0->0, 1->1, ..., 9->9 的映射
# 损失函数机制：CrossEntropyLoss强制模型学习这个对应关系
# 深度学习惯例：第i个输出神经元默认对应第i个类别
# 训练过程：通过反向传播，模型逐渐学会看到数字i时让第i个神经元激活最大
# 关键点：这个对应关系不是代码自动发现的，而是通过数据集设计、损失函数和训练过程共同强制建立的约定。
