# 1.使用pytorch，完成mnist深层网络处理（每题10分）
# (1)数据处理
# ①使用dataset获取数据
import matplotlib.pyplot as plt
import torch
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor

train = MNIST(root='data', train=True, transform=ToTensor(), download=True)
test = MNIST(root='data', train=False, transform=ToTensor(), download=True)

# ②设置超参数
epoch = 5
batch_size = 32

# ③将数据装入管道
'DataLoader 数据加载器 批量加载数据'
from torch.utils.data import DataLoader

train_loader = DataLoader(dataset=train, batch_size=batch_size)
test_loader = DataLoader(dataset=test, batch_size=batch_size)

print()

# (2)模型处理
# ①处理3个隐藏层，每个隐藏层神经元个数为512
# ②使用relu激活，激活后使用dropout处理
# ③完成模型创建并编译
model = torch.nn.Sequential(
    torch.nn.Linear(in_features=28 * 28, out_features=512),
    torch.nn.ReLU(),
    torch.nn.Dropout(),
    torch.nn.Linear(in_features=512, out_features=512),
    torch.nn.ReLU(),
    torch.nn.Dropout(),
    torch.nn.Linear(in_features=512, out_features=512),
    torch.nn.ReLU(),
    torch.nn.Dropout(),
    torch.nn.Linear(in_features=512, out_features=10)
)

op = torch.optim.Adam(params=model.parameters())
loss_fn = torch.nn.CrossEntropyLoss()

# ④完成小批量梯度下降
loss_list = []
model.train()
for i in range(epoch):
    total_loss = 0
    batch_num = 0

    for (x, y) in train_loader:
        op.zero_grad()
        x = x.reshape(-1, 28 * 28)
        h = model(x)
        loss = loss_fn(h, y)
        loss_list.append(loss)
        total_loss += loss.item()
        batch_num += 1
        loss.backward()
        op.step()

    # ⑤计算每个epoch的平均代价
    print(i, total_loss / batch_num)

# ⑥打印预测及准确率
model.eval()
total_acc = 0

for (x, y) in test_loader:
    x = x.reshape(-1, 28 * 28)
    predict = model(x).argmax(-1)
    acc = (predict == y).float().sum().item()
    total_acc += acc
print(total_acc / len(test))
# ⑦可视化模型处理过程中损失值变化
plt.plot(loss_list)
plt.show()
