"""
使用卷积神经网络(Convolutional Neural Network)进行训练
"""
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms, datasets

# 当前训练使用的gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# 定义模型
class NeuralNetwork(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        # 定义第一层卷积神经网络,输入为(batch,1,28,28),输出目标为(batch,10,24,24)
        self.conv1 = nn.Conv2d(1, 10, 5)
        # 中间经过下采样后,数据已经变成了(batch,10,12,12)
        # 定义第二层卷积神经网络,输入为(batch,10,12,12),输出目标为(batch,20,8,8)
        self.conv2 = nn.Conv2d(10, 20, 5)
        # 定义下采样层
        self.pool = nn.MaxPool2d(2, 2)
        # 定义激活函数
        self.activation = nn.ReLU()
        # 下采样后,此时输入层元素数量为20*4*4
        self.fc1 = nn.Linear(320, 256)
        self.fc2 = nn.Linear(256, 128)
        self.fc3 = nn.Linear(128, 64)
        self.fc4 = nn.Linear(64, 32)
        self.fc5 = nn.Linear(32, 10)

    def forward(self, x):
        # 获得当前的batch size
        batch_size = x.size(0)
        # 计算,卷积->下采样->激活
        x = self.activation(self.pool(self.conv1(x)))
        x = self.activation(self.pool(self.conv2(x)))
        # 降维
        x = x.view(batch_size, -1)
        # 对结果进行全链接,将最终目标映射到目标类型
        x = self.activation(self.fc1(x))
        x = self.activation(self.fc2(x))
        x = self.activation(self.fc3(x))
        x = self.activation(self.fc4(x))
        x = self.fc5(x)
        return x


# 以下操作,如果有问题请参照 FCNNTrain.py
# 数据集预处理
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

# 数据集预备
trainSet = datasets.MNIST("../../dataset", train=True, download=True, transform=transform)
testSet = datasets.MNIST("../../dataset", train=False, download=True, transform=transform)
# 数据集加载
trainLoader = DataLoader(trainSet, batch_size=64, shuffle=True, num_workers=4)
testLoader = DataLoader(testSet, batch_size=64, shuffle=False, num_workers=2)

# 初始化模型
model = NeuralNetwork()
# 模型转移
model.to(device)
# 损失
criterion = nn.CrossEntropyLoss()
# 优化器
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)


def myTrainEpoch(model, trainLoader):
    # 记录最低的loss下的权重
    minLoss = 9999
    # 记录每轮epoch迭代后的平均loss
    totalLoss = 0
    for batch_idx, (data, target) in enumerate(trainLoader):
        # 数据转移
        data, target = data.to(device), target.to(device)
        # 计算目标值
        y_pred = model(data)
        # 计算损失
        loss = criterion(y_pred, target)
        totalLoss += loss.item()
        # 当比当前的loss更小时记录
        if loss.item() < minLoss:
            minLoss = loss.item()
            # 保存当前模型
            torch.save(model.state_dict(), "models/best.pth")
        # 清除梯度
        optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 更新权重
        optimizer.step()
    # 返回当前的损失
    return totalLoss / len(trainLoader)


def myTestEpoch(model, testLoader):
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (data, target) in enumerate(testLoader):
            # 数据转移
            data, target = data.to(device), target.to(device)
            # 计算目标值
            y_pred = model(data)
            # 计算当前组结果
            _, predicted = torch.max(y_pred, dim=1)
            # 记录到总数
            total += target.size(0)
            # 记录正确的数据
            correct += (predicted == target).sum().item()
    # 输出当前的损失
    return correct * 1.0 / total


if __name__ == '__main__':
    for epoch in range(100):
        trainLoss = myTrainEpoch(model, trainLoader)
        testRate = myTestEpoch(model, testLoader)
        print(f"Epoch: {epoch}, Train Loss: {trainLoss:.4f}, Test Rate: {testRate:.4f}")
