from mxnet import nd

w = nd.arange(4).reshape((1,1, 2,2))
b = nd.array([1])

data = nd.arange(9).reshape((1, 1, 3, 3))
out = nd.Convolution(data, w, b, kernel=w.shape[2:], num_filter=w.shape[1])
print('kernel size: ', w.shape[2:])
print('input: ', data)
print('weight: ', w)
print('bias: ', b)
print('out: ', out)


out = nd.Convolution(data, w, b, kernel=w.shape[2:],
                     num_filter=w.shape[1],
                     stride=(2,2), pad=(1,1))
print('kernel size: ', w.shape[2:])
print('input: ', data)
print('weight: ', w)
print('bias: ', b)
print('out: ', out)

# multi-channel data
w = nd.arange(8).reshape((1, 2, 2, 2))
data = nd.arange(18).reshape((1,2,3,3))

out = nd.Convolution(data, w, b, kernel=w.shape[2:],
                     num_filter=w.shape[0])

print('kernel size: ', w.shape[2:])
print('input: ', data)
print('weight: ', w)
print('bias: ', b)
print('out: ', out)

# multi-channel output need multi-channel weight
w = nd.arange(16).reshape((2,2,2,2))
data = nd.arange(18).reshape((1,2,3,3))
b = nd.array([1,2])

out = nd.Convolution(data, w, b, kernel=w.shape[2:],
                     num_filter=w.shape[0])

print('kernel size: ', w.shape[2:])
print('input: ', data)
print('weight: ', w)
print('bias: ', b)
print('out: ', out)


# pooling layer
data = nd.arange(18).reshape((1,2,3,3))
max_pool = nd.Pooling(data=data, pool_type='max',
                      kernel=(2,2))
avg_pool = nd.Pooling(data=data, pool_type='avg',
                      kernel=(2,2))

print('data: ', data)
print('max pooling: ', max_pool)
print('avg pooling: ', avg_pool)

from mxnet import gluon
import mxnet as mx


def transform(data, label):
    return data.astype('float32') / 255, label.astype('float32')


mnist_train = gluon.data.vision.FashionMNIST(train=True, transform=transform)
mnist_test = gluon.data.vision.FashionMNIST(train=False, transform=transform)

data, label = mnist_train[0]
print("example shape: ", data.shape, "label: ", label)

batch_size = 256
train_data = gluon.data.DataLoader(mnist_train, batch_size, shuffle=True)
test_data = gluon.data.DataLoader(mnist_test, batch_size, shuffle=False)

ctx = mx.cpu()
# LeNet
weight_scale = .01
# output channels = 20, kernel= (5, 5)
W1 = nd.random_normal(shape=(20, 1, 5, 5),
                      scale=weight_scale, ctx=ctx)

b1 = nd.zeros(W1.shape[0], ctx=ctx)

# output channels = 50, kernel = (3,3)
W2 = nd.random_normal(shape=(50, 20, 3, 3),
                      scale=weight_scale, ctx=ctx)
b2 = nd.zeros(W2.shape[0], ctx=ctx)

# output dim = 128
W3 = nd.random_normal(shape=(1250, 128),
                      scale=weight_scale, ctx=ctx)
b3 = nd.zeros(W3.shape[1], ctx=ctx)

# output dim = 10
W4 = nd.random_normal(shape=(W3.shape[1], 10),
                      scale=weight_scale, ctx=ctx)
b4 = nd.zeros(W4.shape[1], ctx=ctx)

params = [W1, b1, W2, b2, W3, b3, W4, b4]
for param in params:
    param.attach_grad()


def net(X, verbose=False):
    X = X.as_in_context(W1.context)
    X = X.reshape((X.shape[0], 1, 28, 28))

    # conv 1
    h1_conv = nd.Convolution(X, W1, b1, kernel=W1.shape[2:], num_filter=W1.shape[0])
    h1_activation = nd.relu(h1_conv)
    h1 = nd.Pooling(h1_activation, pool_type='max', kernel=(2,2), stride=(2,2))

    # conv2
    h2_conv = nd.Convolution(h1, W2, b2, kernel=W2.shape[2:], num_filter=W2.shape[0])
    h2_activation = nd.relu(h2_conv)
    h2 = nd.Pooling(h2_activation, pool_type='max', kernel=(2,2), stride=(2,2))
    h2 = nd.flatten(h2)
    # print('h2: ', h2)
    # print(h2.shape)
    # print(W3.shape)

    # full connection

    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)
    # print('h3 shape: ', h3.shape)
    # print(W4.shape)
    # full connection
    h4_linear = nd.dot(h3, W4) + b4
    if verbose:
        print('1st conv block:', h1.shape)
        print('2nd conv block:', h2.shape)
        print('1st dense:', h3.shape)
        print('2nd dense:', h4_linear.shape)
        print('output:', h4_linear)
    return h4_linear


for data, _ in train_data:
    net(data, verbose=True)
    break


def accuracy(output, label):
    return nd.mean(output.argmax(axis=1) == label).asscalar()


def evaluate_accuracy(data_iter, net):
    acc = 0
    for data, label in data_iter:
        output = net(data)
        acc += accuracy(output, label)
    return acc / len(data_iter)


def SGD(params, lr):
    for param in params:
        param[:] = param - lr * param.grad


from mxnet import autograd
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
learning_rate = .2

for epoch in range(5):
    train_loss = 0
    train_acc = 0

    for data, label in train_data:
        with autograd.record():
            output = net(data)
            loss = softmax_cross_entropy(output, label)
        loss.backward()
        SGD(params, learning_rate/batch_size)
        train_loss += nd.mean(loss).asscalar()
        train_acc += accuracy(output, label)

    test_acc = evaluate_accuracy(test_data, net)
    print("Epoch: %d, Loss: %f, Train acc: %f, Test acc: %f" %
          (epoch, train_loss/len(train_data),
           train_acc/len(train_data), test_acc))
