import sys
import time
import numpy as np
import mxnet as mx
from mxnet import nd, gluon, init,autograd
from mxnet.gluon import nn,data,loss,utils

ctx = mx.cpu()

class Residual(nn.Block):
    def __init__(self, num_channels, use_1x1conv=False, strides=1, **kwargs):
        super(Residual, self).__init__(**kwargs)
        self.conv1 = nn.Conv2D(num_channels, kernel_size=3, padding=1,strides=strides)
        self.conv2 = nn.Conv2D(num_channels, kernel_size=3, padding=1)
        if use_1x1conv:
            self.conv3 = nn.Conv2D(num_channels, kernel_size=1,strides=strides)
        else:
            self.conv3 = None
        self.bn1 = nn.BatchNorm()
        self.bn2 = nn.BatchNorm()
    def forward(self, X):
        Y = nd.relu(self.bn1(self.conv1(X)))
        Y = self.bn2(self.conv2(Y))
        if self.conv3:
            X = self.conv3(X)
        return nd.relu(Y + X)

def resnet_block(num_channels, num_residuals, first_block=False):
    blk = nn.Sequential()
    for i in range(num_residuals):
        if i == 0 and not first_block:
            blk.add(Residual(num_channels, use_1x1conv=True))
        else:
            blk.add(Residual(num_channels))
    return blk

def _get_batch(batch, ctx):
    """Return features and labels on ctx."""
    features, labels = batch
    if labels.dtype != features.dtype:
        labels = labels.astype(features.dtype)
    return (utils.split_and_load(features, ctx),
            utils.split_and_load(labels, ctx),
            features.shape[0])

def evaluate_accuracy(data_iter, net, ctx=[mx.cpu()]):
    """Evaluate accuracy of a model on the given data set."""
    if isinstance(ctx, mx.Context):
        ctx = [ctx]
    acc = nd.array([0])
    n = 0
    for batch in data_iter:
        features, labels, _ = _get_batch(batch, ctx)
        for X, y in zip(features, labels):
            y = y.astype('float32')
            acc += (net(X).argmax(axis=1) == y).sum().copyto(mx.cpu())
            n += y.size
        acc.wait_to_read()
    return acc.asscalar() / n

def train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs):
    """Train and evaluate a model."""
    print('training on', ctx)
    if isinstance(ctx, mx.Context):
        ctx = [ctx]
    for epoch in range(1, num_epochs + 1):
        train_l_sum, train_acc_sum, n, m = 0.0, 0.0, 0.0, 0.0
        start = time.time()
        for i, batch in enumerate(train_iter):
            Xs, ys, batch_size = _get_batch(batch, ctx)
            ls = []
            with autograd.record():
                y_hats = [net(X) for X in Xs]
                ls = [loss(y_hat, y) for y_hat, y in zip(y_hats, ys)]
            for l in ls:
                l.backward()
            #train_acc_sum += sum([(y_hat.argmax(axis=1) == y).sum().asscalar()
            #                     for y_hat, y in zip(y_hats, ys)])
            train_l_sum += sum([l.sum().asscalar() for l in ls])
            trainer.step(batch_size)
            n += batch_size
            m += sum([y.size for y in ys])
        #test_acc = evaluate_accuracy(test_iter, net, ctx)
        print('epoch %d, loss %.4f, time %.1f sec'
              % (epoch, train_l_sum / n, time.time() - start))

indata=nd.array(np.load("indata.npy").astype(np.float32))
outdata=nd.array(np.load("outdata.npy").astype(np.float32))
batch_sz=32
train_data=data.DataLoader(data.ArrayDataset(indata,outdata),
                            batch_size=batch_sz, shuffle=True)
indata_t=nd.array(np.load("indata_t.npy").astype(np.float32))
outdata_t=nd.array(np.load("outdata_t.npy").astype(np.float32))
test_data=data.DataLoader(data.ArrayDataset(indata_t,outdata_t),
                            batch_size=batch_sz, shuffle=True)

net = nn.Sequential()
net.add(resnet_block(16, 2, first_block=True),
        resnet_block(64, 2),
        resnet_block(16, 2),
        resnet_block(2, 2))

#net.initialize(init=init.Xavier(magnitude=2.24), ctx=ctx)
net.load_params('resnet.params')
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': .001})
loss = loss.L2Loss()
train(train_data, test_data, net, loss, trainer, ctx, num_epochs=5)
