from mxnet.gluon import nn
from mxnet import gluon
from mxnet import nd
import mxnet as mx
from mxnet import autograd


def transform(data, label):
    return data.astype('float32') / 255, label.astype('float32')


def accuracy(output, label):
    return nd.mean(output.argmax(axis=1) == label).asscalar()


def evaluate_accuracy(data_iter, net):
    acc = 0
    for data, label in data_iter:
        output = net(data)
        acc += accuracy(output, label)
    return acc / len(data_iter)


mnist_train = gluon.data.vision.FashionMNIST(train=True, transform=transform)
mnist_test = gluon.data.vision.FashionMNIST(train=False, transform=transform)

data, label = mnist_train[0]
print("example shape: ", data.shape, "label: ", label)


def load_data_fashion_mnist(batch_size=64, resize=224):
    train_data = gluon.data.DataLoader(mnist_train, batch_size, shuffle=True)
    test_data = gluon.data.DataLoader(mnist_test, batch_size, shuffle=False)
    return train_data, test_data


def train(train_data, test_data, net, batch_size, loss_func, trainer, ctx, num_epochs=1):
    for epoch in range(num_epochs):
        train_loss = 0
        train_acc = 0
        for data, label in train_data:

            with autograd.record():
                output = net(data)
                loss = loss_func(output, label)
            loss.backward()

            trainer.step(batch_size)
            train_loss += nd.mean(loss).asscalar()
            train_acc += accuracy(output, label)

        test_acc = evaluate_accuracy(test_data, net)
        print("Epoch: %d, Loss: %f, Train acc: %f, Test acc: %f" %
              (epoch, train_loss/len(train_data), train_acc/len(train_data), test_acc))
