from mxnet import nd
import mxnet as mx
from mxnet import autograd
from mxnet import gluon


def pure_batch_norm(X, gamma, beta, eps=1e-5):
    assert len(X.shape) in (2, 4)

    # full connect:batch_size X feature
    if len(X.shape) == 2:
        mean = X.mean(axis=0)
        variance = ((X - mean) ** 2).mean(axis=0)
    # 2D convolution: batch_size * channel * height * width
    else:
        # calculate mean and variance for each channel
        # keep 4D for broadcasting
        mean = X.mean(axis=(0, 2, 3), keepdims=True)
        variance = ((X - mean) ** 2).mean(axis=(0,2,3), keepdims=True)

    # normalization
    X_hat = (X - mean) / nd.sqrt(variance + eps)
    return gamma.reshape(mean.shape) * X_hat + beta.reshape(mean.shape)


A = nd.arange(6).reshape((3,2))
print(A)

bn1 = pure_batch_norm(A, gamma=nd.array([1,1]), beta=nd.array([0,0]))
print(bn1)

B = nd.arange(18).reshape((1,2,3,3))
print(B)
bn2 = pure_batch_norm(B, gamma=nd.array([1,1]), beta=nd.array([0, 0]))
print(bn2)


# use moving average for test
def batch_norm(X, gamma, beta, is_training, moving_mean,
               moving_variance, eps=1e-5,
               moving_momentum=0.9):
    assert len(X.shape) in (2, 4)

    if len(X.shape) == 2:
        mean = X.mean(axis=0)
        variance = ((X - mean) ** 2).mean(axis=0)
    else:
        mean = X.mean(axis=(0,2,3), keepdims=True)
        variance = ((X - mean) ** 2).mean(axis=(0,2,3), keepdims=True)

        moving_mean = moving_mean.reshape(mean.shape)
        moving_variance = moving_variance.reshape(mean.shape)

    if is_training:
        X_hat = (X - mean) / nd.sqrt(variance + eps)
        # updating global moving mean and moving variance
        moving_mean[:] = moving_momentum * moving_mean + (
            1.0 - moving_momentum) * mean
        moving_variance[:] = moving_momentum * moving_variance + (
            1.0 - moving_momentum) * variance
    else:
        X_hat = (X - moving_mean) / nd.sqrt(moving_variance + eps)

    return gamma.reshape(mean.shape) * X_hat + beta.reshape(mean.shape)


ctx = mx.cpu()
weight_scale = .01
# output channel = 20 , kernel=(5, 5)
c1 = 20
W1 = nd.random.normal(shape=(c1, 1, 5, 5), scale=weight_scale, ctx=ctx)
b1 = nd.zeros(c1, ctx=ctx)

# first layer batch norm
gamma1 = nd.random.normal(shape=c1, scale=weight_scale, ctx=ctx)
beta1 = nd.random.normal(shape=c1, scale=weight_scale, ctx=ctx)
moving_mean1 = nd.zeros(c1, ctx=ctx)
moving_variance1 = nd.zeros(c1, ctx=ctx)

# output channel = 50 , kernel=(3, 3)
c2 = 50
W2 = nd.random.normal(shape=(c2, c1, 3, 3), scale=weight_scale, ctx=ctx)
b2 = nd.zeros(c2, ctx=ctx)

# first layer batch norm
gamma2 = nd.random.normal(shape=c2, scale=weight_scale, ctx=ctx)
beta2 = nd.random.normal(shape=c2, scale=weight_scale, ctx=ctx)
moving_mean2 = nd.zeros(c2, ctx=ctx)
moving_variance2 = nd.zeros(c2, ctx=ctx)

# output dim = 128
o3 = 128
W3 = nd.random.normal(shape=(1250, o3), scale=weight_scale, ctx=ctx)
b3 = nd.zeros(o3, ctx=ctx)

# output dim = 10
W4 = nd.random.normal(shape=(W3.shape[1], 10), scale=weight_scale, ctx=ctx)
b4 = nd.zeros(W4.shape[1], ctx=ctx)

params = [W1, b1, gamma1, beta1,
          W2, b2, gamma2, beta2,
          W3, b3, W4, b4]

for param in params:
    param.attach_grad()


def net(X, is_training=False, verbose=False):
    X = X.reshape((X.shape[0], 1, 28, 28))
    X = X.as_in_context(W1.context)

    # first conv
    h1_conv = nd.Convolution(X, W1, b1, kernel=W1.shape[2:], num_filter=c1)
    # print('h1_conv: ', h1_conv.shape)
    # batch norm
    h1_bn = batch_norm(h1_conv, gamma1, beta1, is_training,
                       moving_mean1, moving_variance1)
    h1_activation = nd.relu(h1_bn)
    h1 = nd.Pooling(h1_activation, pool_type='max', kernel=(2,2), stride=(2,2))

    # second conv
    h2_conv = nd.Convolution(h1, W2, b2, kernel=W2.shape[2:], num_filter=c2)
    # batch norm
    h2_bn = batch_norm(h2_conv, gamma2, beta2, is_training,
                       moving_mean2, moving_variance2)
    h2_activation = nd.relu(h2_bn)
    h2 = nd.Pooling(h2_activation, pool_type='max', kernel=(2,2), stride=(2,2))
    h2 = nd.flatten(h2)
    # print('h1: ', h1.shape)
    # print('h2: ', h2.shape)
    # print('W3: ', W3.shape)
    h3_linear = nd.dot(h2, W3) + b3
    h3 = nd.relu(h3_linear)

    h4_linear = nd.dot(h3, W4) + b4

    if verbose:
        print('1st conv block: ', h1.shape)
        print('2nd conv block: ', h2.shape)
        print('1st dense: ', h3.shape)
        print('2nd dense: ', h4_linear.shape)
        print('output: ', h4_linear)
    return h4_linear


def transform(data, label):
    return data.astype('float32') / 255, label.astype('float32')


def accuracy(output, label):
    return nd.mean(output.argmax(axis=1) == label).asscalar()


def evaluate_accuracy(data_iter, net):
    acc = 0
    for data, label in data_iter:
        output = net(data)
        acc += accuracy(output, label)
    return acc / len(data_iter)


mnist_train = gluon.data.vision.FashionMNIST(train=True, transform=transform)
mnist_test = gluon.data.vision.FashionMNIST(train=False, transform=transform)

data, label = mnist_train[0]
print("example shape: ", data.shape, "label: ", label)

batch_size = 256
train_data = gluon.data.DataLoader(mnist_train, batch_size, shuffle=True)
test_data = gluon.data.DataLoader(mnist_test, batch_size, shuffle=False)


def SGD(params, lr):
    for param in params:
        param[:] = param - lr * param.grad


# from mxnet import autograd
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
learning_rate = .2

for epoch in range(5):
    train_loss = 0
    train_acc = 0

    for data, label in train_data:
        with autograd.record():
            output = net(data, is_training=True)
            loss = softmax_cross_entropy(output, label)
        loss.backward()
        SGD(params, learning_rate/batch_size)
        train_loss += nd.mean(loss).asscalar()
        train_acc += accuracy(output, label)

    test_acc = evaluate_accuracy(test_data, net)
    print("Epoch: %d, Loss: %f, Train acc: %f, Test acc: %f" %
          (epoch, train_loss/len(train_data),
           train_acc/len(train_data), test_acc))

