"""
多层网络：手写数字分类
"""

import os
import torch
import torch.nn as nn
import torchvision.datasets
import torchvision.transforms as transforms
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt


class NeuralNet(nn.Module):
    def __init__(self, input_size, hidden_size, num_classes):
        super(NeuralNet, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.relu = nn.ReLU()
        self.fc2 = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        out = self.fc1(x)
        out = self.relu(out)
        out = self.fc2(out)
        return out


if __name__ == '__main__':    
    input_size = 28*28
    num_classes = 10
    hidden_size = 500
    batch_size = 128
    num_epochs = 5
    nw = os.cpu_count()
    model_path = 'model/checkpoint/mnist-2.pth'
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('使用设备：', device)
    trainset = torchvision.datasets.MNIST(root='./dataset/mnist', train=True, download=True, transform=transforms.ToTensor())
    trainloader = torch.utils.data.DataLoader(dataset=trainset, batch_size=batch_size, shuffle=True, num_workers=nw, drop_last=True)
    testset = torchvision.datasets.MNIST(root='./dataset/mnist', train=False, download=True, transform=transforms.ToTensor())
    testloader = torch.utils.data.DataLoader(dataset=testset, batch_size=batch_size, shuffle=False, num_workers=nw)

    model = NeuralNet(input_size, hidden_size, num_classes).to(device)

    if os.path.isfile(model_path):
        print('load model:', model_path)
        model.load_state_dict(torch.load(model_path))
        model.train()
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    # train model
    for epoch in range(num_epochs):
        print(f'Epoch:{epoch+1}/{num_epochs}')
        pbar = tqdm(trainloader)
        running_loss = []
        for images, labels in pbar:
            images, labels = images.to(device), labels.to(device)
            seqs = images.reshape(-1, input_size)
            outputs = model(seqs)
            loss = criterion(outputs, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            running_loss.append(loss.item())
            pbar.set_description_str(f'loss:{round(np.nanmean(running_loss), 3)}')

    # save model
    if not os.path.isdir(os.path.dirname(model_path)):
        os.makedirs(os.path.dirname(model_path))
    torch.save(model.state_dict(), model_path)

    # test model
    model.eval()
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels in testloader:
            images, labels = images.to(device), labels.to(device)
            seqs = images.reshape(-1, input_size)
            outputs = model(seqs)
            _, preds = torch.max(outputs, 1)
            total += labels.size(0)
            correct += (preds==labels).sum().item()
        print(f'Test accuracy on 10000 examples:{round(correct/total*100, 2)}%')

    # show image
    model.to(torch.device('cpu'))
    images, labels = next(iter(testloader))
    with torch.no_grad():
        outputs = model(images.reshape(-1, input_size))
        _, preds = torch.max(outputs, 1)
    plt.figure()
    for i, image in enumerate(images.numpy()[:16, :, :]):
        plt.subplot(4,4,i+1)
        plt.imshow(image.squeeze())
        plt.title(f'label:{labels[i].item()},pred:{preds[i].item()}')
        plt.axis('off')
    plt.show()
