"""
使用全链接神经网络(Fully Connected Neural Network)进行训练
"""
import torch
from torch import nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader


# 定义模型
class NeuralNet(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        # 输入图像数据为1*28*28(1通道,h=28,w=28)
        self.fc1 = nn.Linear(784, 512)
        self.fc2 = nn.Linear(512, 256)
        self.fc3 = nn.Linear(256, 128)
        self.fc4 = nn.Linear(128, 64)
        # 最终的输出是10个,对应着类型数目
        self.fc5 = nn.Linear(64, 10)
        self.activation = nn.ReLU()

    def forward(self, x):
        # 将输入的数据降维,n*1*28*28->(n,784),
        x = x.view(-1, 784)
        # 线性计算后接relu激活
        x = self.activation(self.fc1(x))
        x = self.activation(self.fc2(x))
        x = self.activation(self.fc3(x))
        x = self.activation(self.fc4(x))
        # 最后一个不做激活运算,需要把线性计算完毕的参数,给到交叉熵
        x = self.fc5(x)
        return x


# 数据集预处理
# Compose中会依次执行操作
transform = transforms.Compose([
    # 先转换到tensor数据
    transforms.ToTensor(),
    # 进行归一化,将单通道图片数据的0~255映射到0~1
    # 这里的均值(0.1307,)和标准差(0.3081,)是已经计算好的(将所有的数据进行预先求取)
    transforms.Normalize((0.1307,), (0.3081,)),
])

# 准备数据集
trainSet = datasets.MNIST(root='../../dataset', train=True, download=True, transform=transform)
testSet = datasets.MNIST(root='../../dataset', train=False, download=True, transform=transform)

# 加载数据集
trainLoader = DataLoader(trainSet, batch_size=64, shuffle=True, num_workers=2)
testLoader = DataLoader(testSet, batch_size=64, shuffle=False, num_workers=2)

# 初始化模型
model = NeuralNet()
# 损失,使用交叉熵
criterion = nn.CrossEntropyLoss()
# 优化器
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)


# 对于每一次的训练循环
def myTrainEpoch(model, trainloader):
    # 记录最低的loss下的权重
    minLoss = 9999
    # 记录每轮epoch迭代后的平均loss
    totalLoss = 0
    # 获得数据
    for batch_idx, (data, target) in enumerate(trainloader):
        # 计算目标值
        y_pred = model(data)
        # 计算损失
        loss = criterion(y_pred, target)
        totalLoss += loss.item()
        # 当比当前的loss更小时记录
        if loss.item() < minLoss:
            minLoss = loss.item()
            # 保存当前模型
            torch.save(model.state_dict(), "models/best.pth")
        # 清除梯度
        optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 更新权重
        optimizer.step()
    # 返回当前的损失
    return totalLoss / len(trainloader)


# 测试
def myTestEpoch(model, testloader):
    correct = 0
    total = 0
    with torch.no_grad():
        # 获得数据
        for batch_idx, (data, target) in enumerate(testloader):
            # 计算结果
            y_pred = model(data)
            # 计算当前组的结果,dim=0会沿着h方向,1沿w方向
            _, predicted = torch.max(y_pred, dim=1)
            # 记录总数
            total += target.size(0)
            # 记录正确的数
            correct += (predicted == target).sum().item()
    # 输出当前损失
    return correct * 1.0 / total


if __name__ == '__main__':
    for epoch in range(100):
        trainLoss = myTrainEpoch(model, trainLoader)
        testRate = myTestEpoch(model, testLoader)
        print(f"Epoch {epoch}, Train Loss: {trainLoss:.4f}, Test Rate: {testRate:.4f}")
