from model import BasalModel
import numpy as np
import time
import torch
from torchvision.datasets import mnist
from torch.utils.data import DataLoader
from torchvision.transforms import ToTensor

batch_size = 256
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

def tarin():
    train_dataset = mnist.MNIST(root='dataset', train=True, download=True, transform=ToTensor())
    train_loader = DataLoader(train_dataset, batch_size=batch_size, drop_last=True)
    model = BasalModel()
    model.to(device)
    sgd = torch.optim.SGD(model.parameters(), lr=1e-1)
    loss_fn = torch.nn.CrossEntropyLoss()
    epoch = 20
    bestAcc = 0.

    for e in range(epoch):
        train_acc = []
        valid_acc = []
        train_loss = []
        valid_loss = []
        model.train()
        cur_time = time.time()
        for idx, (train_x, train_label) in enumerate(train_loader):
            train_x, train_label = train_x.to(device=device), train_label.to(device=device)
            sgd.zero_grad()
            predict_y = model(train_x.float())
            loss = loss_fn(predict_y, train_label.long())
            loss.backward()
            sgd.step()
            predict_y = np.argmax(predict_y.cpu().detach(), axis=-1)
            correct_num = predict_y == train_label.cpu()
            train_acc.append(np.sum(correct_num.numpy(),axis=-1)/correct_num.shape[0])
            train_loss.append(loss.sum().item())
            if idx > 200:
                model.eval()
                predict_y = model(train_x.float()).detach()
                loss = loss_fn(predict_y, train_label.long())
                predict_y = np.argmax(predict_y.cpu(), axis=-1)
                correct_num = predict_y == train_label.cpu()
                valid_acc.append(np.sum(correct_num.numpy(),axis=-1)/correct_num.shape[0])
                valid_loss.append(loss.sum().item())
        train_acc, valid_acc = np.mean(train_acc), np.mean(valid_acc)
        train_loss, valid_loss = np.mean(train_loss), np.mean(valid_loss)
        print('epoch: {}, train_acc: {:.4f}, train_loss: {:.4f}, valid_acc: {:.4f}, valid_loss: {:.4f}, time: {:.2f}'.format(
               e, train_acc, train_loss, valid_acc, valid_loss, time.time()-cur_time))
        if valid_acc > bestAcc:
            bestAcc = valid_acc
            torch.save(model, 'model_weights_best.pkl')

def test():
    test_dataset = mnist.MNIST(root='dataset', train=False, download=True, transform=ToTensor())
    test_loader = DataLoader(test_dataset, batch_size=batch_size, drop_last=True)
    model = torch.load('model_weights_best.pkl')
    test_acc = []
    model.eval()
    for _, (data, label) in enumerate(test_loader):
        data = data.to(device=device)
        predict_y = model(data.float()).detach()
        predict_y = np.argmax(predict_y.cpu(), axis=-1)
        correct_num = predict_y == label
        test_acc.append(np.sum(correct_num.numpy(),axis=-1)/correct_num.shape[0])
    test_acc = np.mean(test_acc)
    print('test_acc: {:.4f}'.format(test_acc))

if __name__ == '__main__':
    tarin()
    test()
