from mxnet.gluon import nn
from mxnet import gluon
from mxnet import nd
import mxnet as mx
from mxnet import autograd


class LeNet(nn.Block):
    def __init__(self, **kwargs):
        super(LeNet, self).__init__(**kwargs)

        self.out = nn.Sequential()
        with self.out.name_scope():
            self.out.add(nn.Conv2D(channels=20, kernel_size=5))
            self.out.add(nn.BatchNorm(axis=1))
            self.out.add(nn.Activation(activation='relu'))
            self.out.add(nn.MaxPool2D(pool_size=2, strides=2))

            self.out.add(nn.Conv2D(channels=50, kernel_size=3))
            self.out.add(nn.BatchNorm(axis=1))
            self.out.add(nn.Activation(activation='relu'))
            self.out.add(nn.MaxPool2D(pool_size=2, strides=2))

            self.out.add(nn.Flatten())
            self.out.add(nn.Dense(128, activation='relu'))
            self.out.add(nn.Dense(10))

    def forward(self, X):
        X = X.reshape((X.shape[0], 1, 28, 28))
        return self.out(X)


def transform(data, label):
    return data.astype('float32') / 255, label.astype('float32')


def accuracy(output, label):
    return nd.mean(output.argmax(axis=1) == label).asscalar()


def evaluate_accuracy(data_iter, net):
    acc = 0
    for data, label in data_iter:
        output = net(data)
        acc += accuracy(output, label)
    return acc / len(data_iter)



mnist_train = gluon.data.vision.FashionMNIST(train=True, transform=transform)
mnist_test = gluon.data.vision.FashionMNIST(train=False, transform=transform)

data, label = mnist_train[0]
print("example shape: ", data.shape, "label: ", label)

batch_size = 256
train_data = gluon.data.DataLoader(mnist_train, batch_size, shuffle=True)
test_data = gluon.data.DataLoader(mnist_test, batch_size, shuffle=False)

ctx = mx.cpu()

net = LeNet()

net.initialize(ctx=ctx)

trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.1})
loss_func = gluon.loss.SoftmaxCrossEntropyLoss()

for epoch in range(5):
    train_loss = 0
    train_acc = 0
    for data, label in train_data:

        with autograd.record():
            output = net(data)
            loss = loss_func(output, label)
        loss.backward()

        trainer.step(batch_size)
        train_loss += nd.mean(loss).asscalar()
        train_acc += accuracy(output, label)

    test_acc = evaluate_accuracy(test_data, net)
    print("Epoch: %d, Loss: %f, Train acc: %f, Test acc: %f" %
          (epoch, train_loss/len(train_data), train_acc/len(train_data), test_acc))
