"""
编程实现如下功能：
要求通过pytorch来实现经典的手写体识别。
"""
# 题目要求：
# ①　按照要求加入程序使用的相关数据包
import torch as pt
import numpy as np
import torchvision as ptv
from torchvision.datasets.mnist import MNIST
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
import sys

# args
BATCH_SIZE = 32
N_EPOCHS = 3

# For GPU
device = 'cuda:0' if pt.cuda.is_available() else 'cpu'
print('device', device)
device = pt.device(device)

# ②　正确加载mnist数据集
# ③　正确进行数据集切分。
root_dir = '../../../../large_data/DL2/pt/mnist'
mnist_train = MNIST(root_dir, train=True, transform=ptv.transforms.ToTensor(), download=False)
mnist_test = MNIST(root_dir, train=False, transform=ptv.transforms.ToTensor(), download=False)
x_train, y_train = mnist_train.data, mnist_train.targets
print(y_train.unique())
x_test, y_test = mnist_test.data, mnist_test.targets
x_train = x_train.float() / 255. * 2. - 1.  # ATTENTION For RuntimeError: "addmm_cuda" not implemented for 'Byte'
x_test = x_test.float() / 255. * 2. - 1.
x_train = pt.reshape(x_train, (-1, 28*28))  # ATTENTION Here is FC only. For  ** On entry to SGEMM  parameter number 10 had an illegal value
x_test = pt.reshape(x_test, (-1, 28*28))
print('x_train', x_train.size())
print('y_train', y_train.size())
print('x_test', x_test.size())
print('y_test', y_test.size())

ds_train = TensorDataset(x_train, y_train)
dl_train = DataLoader(ds_train, BATCH_SIZE, shuffle=True, drop_last=True)
ds_test = TensorDataset(x_test, y_test)
dl_test = DataLoader(ds_test, BATCH_SIZE, shuffle=True, drop_last=False)

# ④　至少定义两层隐藏层神经网络。
# ⑤　定义前向传播函数
# ⑧　要求加入dropout功能。
model = pt.nn.Sequential(
    pt.nn.Linear(784, 256),
    pt.nn.ReLU(),
    pt.nn.Dropout(0.2),
    pt.nn.Linear(256, 10)
).to(device)

# ⑦　正确写出损失函数。
loss = pt.nn.CrossEntropyLoss()
optim = pt.optim.Adam(params=model.parameters(), lr=0.01)

# ⑩　正确输出正确率，损失值。
def accuracy(y_true, y_pred):
    y_true = y_true.long()
    y_pred = y_pred.argmax(dim=1)
    acc = pt.eq(y_true, y_pred).float().mean()
    return acc


# ⑥　定义反向传播
# ⑨　实现预测值和真实值的比较结果。
def processData(dl, label, is_train):
    dl_len = len(dl)
    group = int(np.ceil(dl_len / 10))
    avg_loss = 0.
    avg_acc = 0.
    for i, (bx, by) in enumerate(dl):
        bx = bx.to(device)
        by = by.long().to(device)
        if is_train:
            model.train(True)
            optim.zero_grad()
            pred = model(bx)
            lossv = loss(pred, by)
            lossv.backward()
            optim.step()
            model.train(False)
        else:
            pred = model(bx)
            lossv = loss(pred, by)
        accv = accuracy(by, pred)
        lossv = lossv.detach().cpu().numpy()
        accv = accv.detach().cpu().numpy()
        if i % group == 0 or i + 1 == dl_len:
            print(f'{label}: #{i + 1} loss = {lossv}, acc = {accv}')
        avg_loss += lossv
        avg_acc += accv
    avg_loss /= i + 1
    avg_acc /= i + 1
    return avg_loss, avg_acc


avg_loss_his, avg_acc_his = [], []
avg_loss_his_val, avg_acc_his_val = [], []
for epoch in range(N_EPOCHS):
    print('-' * 32, f'epoch#{epoch + 1}', '-' * 32, sep='')

    avg_loss, avg_acc = processData(dl_train, 'train', True)
    avg_loss_his.append(avg_loss)
    avg_acc_his.append(avg_acc)
    print(f'epoch#{epoch + 1}: loss = {avg_loss}, acc = {avg_acc}')

    avg_loss_val, avg_acc_val = processData(dl_test, 'val', True)
    avg_loss_his_val.append(avg_loss_val)
    avg_acc_his_val.append(avg_acc_val)

    print(f'epoch#{epoch + 1}: loss = {avg_loss}, acc = {avg_acc}, loss_val = {avg_loss_val}, acc_val = {avg_acc_val}')

avg_loss, avg_acc = processData(dl_test, 'test', True)
print(f'Test: loss = {avg_loss}, acc = {avg_acc}')

spr = 1
spc = 2
spn = 0
plt.figure(figsize=[12, 6])

spn += 1
plt.subplot(spr, spc, spn)
plt.title('Loss')
plt.plot(avg_loss_his, label='train')
plt.plot(avg_loss_his_val, label='val')
plt.legend()

spn += 1
plt.subplot(spr, spc, spn)
plt.title('Accuracy')
plt.plot(avg_acc_his, label='train')
plt.plot(avg_acc_his_val, label='val')
plt.legend()

plt.show()
