"""
卷积：手写数字分类
"""

import os
import torch
import torch.nn as nn
import torchvision.datasets
import torch.utils.data
import torchvision.transforms as transforms
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt


class ConvNet(nn.Module):
    def __init__(self, num_classes):
        super(ConvNet, self).__init__()
        self.layer1 = nn.Sequential(
            nn.Conv2d(1, 16, kernel_size=(5, 5), stride=(1, 1), padding=2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        self.layer2 = nn.Sequential(
            nn.Conv2d(16, 32, kernel_size=(5, 5), stride=(1, 1), padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2))
        self.fc = nn.Linear(7 * 7 * 32, num_classes)

    def forward(self, x):
        out = self.layer1(x)
        out = self.layer2(out)
        out = out.reshape(out.size(0), -1)
        out = self.fc(out)
        return out


if __name__ == '__main__':
    num_classes = 10
    batch_size = 128
    num_epochs = 5
    nw = os.cpu_count()
    model_path = 'model/checkpoint/CNN.pth'
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print('use device:', device)

    trainset = torchvision.datasets.MNIST(root='./dataset/mnist', train=True, download=True,
                                          transform=transforms.ToTensor())
    trainloader = torch.utils.data.DataLoader(dataset=trainset, batch_size=batch_size, shuffle=True, num_workers=nw,
                                              drop_last=True)
    testset = torchvision.datasets.MNIST(root='./dataset/mnist', train=False, download=True,
                                         transform=transforms.ToTensor())
    testloader = torch.utils.data.DataLoader(dataset=testset, batch_size=batch_size, shuffle=False, num_workers=nw)

    model = ConvNet(num_classes).to(device)

    if os.path.isfile(model_path):
        print('load model:', model_path)
        model.load_state_dict(torch.load(model_path))
        model.train()
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    # train model
    for epoch in range(num_epochs):
        print(f'Epoch:{epoch + 1}/{num_epochs}')
        pbar = tqdm(trainloader)
        running_loss = []
        for images, labels in pbar:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            loss = criterion(outputs, labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            running_loss.append(loss.item())
            pbar.set_description_str(f'loss:{round(np.nanmean(running_loss), 3)}')

    # save model
    if not os.path.isdir(os.path.dirname(model_path)):
        os.makedirs(os.path.dirname(model_path))
    torch.save(model.state_dict(), model_path)

    # test model
    model.eval()
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels in testloader:
            images, labels = images.to(device), labels.to(device)
            outputs = model(images)
            _, preds = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (preds == labels).sum().item()
        print(f'Test accuracy on 10000 examples:{round(correct / total * 100, 2)}%')

    # show image
    model.to(torch.device('cpu'))
    images, labels = next(iter(testloader))
    with torch.no_grad():
        outputs = model(images)
        _, preds = torch.max(outputs, 1)
    plt.figure()
    for i, image in enumerate(images.numpy()[:16, :, :]):
        plt.subplot(4, 4, i + 1)
        plt.imshow(image.squeeze())
        plt.title(f'label:{labels[i].item()},pred:{preds[i].item()}')
        plt.axis('off')
    plt.show()
