# 1.使用pytorch，完成mnist的Lenet-5类封装网络处理（每题10分）
# (1)数据处理
import matplotlib.pyplot as plt
import torch.nn
from torchvision.datasets.mnist import MNIST
from torchvision.transforms import ToTensor

batch_size = 32
# ①使用dataset获取数据，设置合理超参数
train = MNIST(root='data', train=True, transform=ToTensor(), download=True)
test = MNIST(root='data', train=False, transform=ToTensor(), download=True)
# ②将数据装入管道
from torch.utils.data import DataLoader

train_loader = DataLoader(dataset=train, batch_size=batch_size)
test_loader = DataLoader(dataset=test, batch_size=batch_size)


# (2)模型处理
# ①Lenet-5两个卷积层，三个全连接层
# ②卷积层通道数分别为6，16，卷积核大小为5； 全连接层通道数自定。
# ③使用relu激活，全连接后使用dropout处理
# ④完成模型创建并编译


class Lenet5(torch.nn.Module):
    def __init__(self) -> None:
        super().__init__()
        self.conv1 = torch.nn.Sequential(
            torch.nn.Conv2d(in_channels=1, out_channels=6, kernel_size=(5, 5), padding='valid'),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)))
        self.conv2 = torch.nn.Sequential(
            torch.nn.Conv2d(in_channels=6, out_channels=16, kernel_size=(5, 5), padding='valid'),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)))
        self.flat = torch.nn.Flatten()
        self.fc = torch.nn.Sequential(
            torch.nn.Linear(in_features=256, out_features=512),
            torch.nn.ReLU(),
            torch.nn.Dropout(),
            torch.nn.Linear(in_features=512, out_features=1024),
            torch.nn.ReLU(),
            torch.nn.Dropout(),
            torch.nn.Linear(in_features=1024, out_features=10)
        )

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.flat(x)
        x = self.fc(x)
        return x


model = Lenet5()
op = torch.optim.Adam(params=model.parameters())
loss_fn = torch.nn.CrossEntropyLoss()

# ⑤完成小批量梯度下降
# ⑥计算每个epoch的平均代价
# ⑦打印每个epoch的准确率
# ⑧可视化模型处理过程中损失值变化
epoch = 5
loss_list = []
for i in range(epoch):
    model.train()
    total_loss = 0
    batch = 0
    for (x, y) in train_loader:
        op.zero_grad()
        h = model(x)
        loss = loss_fn(h, y)
        loss_list.append(loss)
        total_loss += loss.item()
        batch += 1
        loss.backward()
        op.step()
    print(i, total_loss / batch)
    model.eval()
    total_acc = 0
    for (x, y) in test_loader:
        h = model(x).argmax(-1)
        acc = (h == y).float()
        total_acc += acc.sum().item()
    print(i, total_acc / len(test))
plt.plot(loss_list)
plt.show()
