# Single Shot MultiBox Detector
import mxnet.gluon.nn as nn
import mxnet as mx
from mxnet import nd
from mxnet.contrib.ndarray import MultiBoxPrior, MultiBoxTarget, MultiBoxDetection
import matplotlib.pyplot as plt
import mxnet.image as image
import numpy as np
import mxnet.gluon as gluon
from mxnet import metric
import time
from mxnet import autograd
import matplotlib


data_shape = 256
batch_size = 32
rgb_mean = nd.array([123, 117, 104])
DATA_DIR = '/home/alpha/ML/data/pikachu/'


def get_iterators(data_dir, data_shape, batch_size):
    class_names = ['pikachu']
    num_class = len(class_names)
    train_iter = image.ImageDetIter(
        batch_size=batch_size,
        data_shape=(3, data_shape, data_shape),
        path_imgrec=data_dir + 'train.rec',
        path_imgidx=data_dir + 'train.idx',
        shuffle=True,
        mean=True,
        rand_crop=1,
        min_object_covered=0.95,
        max_attempts=200)
    val_iter = image.ImageDetIter(
        batch_size=batch_size,
        data_shape=(3, data_shape, data_shape),
        path_imgrec=data_dir + 'val.rec',
        shuffle=False,
        mean=True)
    return train_iter, val_iter, class_names, num_class


# http://zh.gluon.ai/chapter_computer-vision/ssd.html


n = 40
x = nd.random_uniform(shape=(1, 3, n, n))

y = MultiBoxPrior(x, sizes=[.5, .25, .1], ratios=[1, 2, .5])
print(y.shape)
boxes = y.reshape((n, n, -1, 4))
print(boxes.shape)
print('The first anchor box at row 21, cloumn 21: ', boxes[20, 20, 0, :])


def box_to_rect(box, color, linewidth=3):
    """convert an anchor box to a matplotlib rectangle"""
    box = box.asnumpy()
    return plt.Rectangle(
        (box[0], box[1]), (box[2] - box[0]), (box[3] - box[1]),
        fill=False, edgecolor=color, linewidth=linewidth
    )


colors = ['blue', 'green', 'red', 'black', 'magenta']

plt.imshow(nd.ones((n, n, 3)).asnumpy())

anchors = boxes[20, 20, :, :]

for i in range(anchors.shape[0]):
    plt.gca().add_patch(box_to_rect(anchors[i, :] * n, colors[i]))

plt.show()


def class_predictor(num_anchors, num_classes):
    """return a layer to predict classes"""
    return nn.Conv2D(num_anchors * (num_classes + 1), 3, padding=1)


cls_pred = class_predictor(5, 10)
cls_pred.initialize()
x = nd.zeros((2, 3, 20, 20))
print('Class prediction: ', cls_pred(x).shape)


def box_predictor(num_anchors):
    """return a layer to predict delta locations"""
    return nn.Conv2D(num_anchors * 4, 3, padding=1)


box_pred = box_predictor(10)
box_pred.initialize()
x = nd.zeros((2, 3, 20, 20))
y = box_pred(x)
print(y.shape)


def down_sample(num_filters):
    """ stack tow conv-batch_norm-relu blocks and then a pooling
    layer to halve the feature size"""
    out = nn.HybridSequential()
    for _ in range(2):
        out.add(nn.Conv2D(num_filters, 3, strides=1, padding=1))
        out.add(nn.BatchNorm(in_channels=num_filters))
        out.add(nn.Activation('relu'))
    out.add(nn.MaxPool2D(2))
    return out


blk = down_sample(10)
blk.initialize()
x = nd.zeros((2, 3, 20, 20))
y = blk(x)
print(y.shape)

# combine pred from different layer
x = nd.zeros((2, 8, 20, 20))
print('x: ', x.shape)
cls_pred1 = class_predictor(5, 10)
cls_pred1.initialize()
y1 = cls_pred1(x)
print('Class pred 1: ', y1.shape)

ds = down_sample(16)
ds.initialize()
x = ds(x)
print('x: ', x.shape)

cls_pred2 = class_predictor(3, 10)
cls_pred2.initialize()
y2 = cls_pred2(x)
print('Class pred 2: ', y2.shape)


def flatten_prediction(pred):
    return pred.transpose(axes=(0, 2, 3, 1)).flatten()


def concat_predictions(preds):
    return nd.concat(*preds, dim=1)


flat_y1 = flatten_prediction(y1)
print('Flatten class pred 1: ', flat_y1.shape)

flat_y2 = flatten_prediction(y2)
print('Flatten class pred 2: ', flat_y2.shape)

y = concat_predictions([flat_y1, flat_y2])
print('Concat class pred: ', y.shape)


# body net: use to extract raw feature, we can use ResNet and so on
def body():
    out = nn.HybridSequential()
    for n_filters in [16, 32, 64]:
        out.add(down_sample(n_filters))  # use downsampe net as body net for ease
    return out


bnet = body()
bnet.initialize()
x = nd.random.uniform(shape=(2, 3, 256, 256))
y = bnet(x)
print(y.shape)


def toy_ssd_model(num_anchors, num_classes):
    down_samplers = nn.Sequential()
    for _ in range(3):
        down_samplers.add(down_sample(128))

    class_predictors = nn.Sequential()
    box_predictors = nn.Sequential()

    for _ in range(5):
        class_predictors.add(class_predictor(num_anchors, num_classes))
        box_predictors.add(box_predictor(num_anchors))

    model = nn.Sequential()
    model.add(body(), down_samplers, class_predictors, box_predictors)
    return model


# forward function
def toy_ssd_forward(x, model, sizes, ratios, verbose=False):
    body, down_samplers, class_predictors, box_predictors = model
    anchors, class_preds, box_preds = [], [], []
    x = body(x)
    for i in range(5):
        anchors.append(MultiBoxPrior(
            x, sizes=sizes[i], ratios=ratios[i]
        ))
        class_preds.append(
            flatten_prediction(class_predictors[i](x))
        )
        box_preds.append(
            flatten_prediction(box_predictors[i](x))
        )
        if verbose:
            print('Predict Scale ', i, x.shape, 'with ',
                  anchors[-1].shape[1], ' anchors')

        # downsample
        if i < 3:
            x = down_samplers[i](x)
        elif i == 3:
            x = nd.Pooling(
                x, global_pool=True, pool_type='max',
                kernel=(x.shape[2], x.shape[3])
            )
    return (
        concat_predictions(anchors),
        concat_predictions(class_preds),
        concat_predictions(box_preds)
    )


class ToySSD(nn.Block):
    def __init__(self, num_classes, verbose=False, **kwargs):
        super(ToySSD, self).__init__(**kwargs)
        # anchor box sizes and ratios for 5 feature scales
        self.sizes = [[.2, .272], [.37, .447], [.54, .619],
                      [.71, .79], [.88, .961]]
        self.ratios = [[1, 2, .5]] * 5

        self.num_classes = num_classes
        self.verbose = verbose
        num_anchors = len(self.sizes[0]) + len(self.ratios[0]) - 1
        with self.name_scope():
            self.model = toy_ssd_model(num_anchors, num_classes)

    def forward(self, x):
        anchors, class_preds, box_preds = toy_ssd_forward(
            x, self.model, self.sizes, self.ratios,
            verbose=self.verbose
        )
        class_preds = class_preds.reshape(shape=(0, -1, self.num_classes+1))
        return anchors, class_preds, box_preds


net = ToySSD(num_classes=2, verbose=True)
net.initialize()

train_data, test_data, class_names, num_class = get_iterators(
    DATA_DIR, data_shape, batch_size)

batch = train_data.next()
x = batch.data[0][0:1]
print('Input: ', x.shape)
anchors, class_preds, box_preds = net(x)
print('Output anchors: ', anchors.shape)
print('Output class preds: ', class_preds.shape)
print('Output box preds: ', box_preds.shape)


# IoU   Area of Overlap / Area of Union
def training_targets(anchors, class_preds, labels):
    class_preds = class_preds.transpose(axes=(0, 2, 1))
    return MultiBoxTarget(anchors, labels, class_preds)


out = training_targets(anchors, class_preds, batch.label[0][0:1])
print(out)


# focal loss function
# - \alpha(1-p_j)^\gamma log(p_j)
def focal_loss(gamma, x):
    return - (1-x) ** gamma * np.log(x)


x = np.arange(0.01, 1, 0.01)
gammas = [0, .25, .5, 1, 4]

for i, g in enumerate(gammas):
    plt.plot(x, focal_loss(g, x), colors[i])

plt.legend(['gamma='+str(g) for g in gammas])
plt.show()


class FocalLoss(gluon.loss.Loss):
    def __init__(self, axis=-1, alpha=0.25, gamma=2, batch_axis=0, **kwargs):
        super(FocalLoss, self).__init__(None, batch_axis, **kwargs)
        self._axis = axis
        self._alpha = alpha
        self._gamma = gamma

    def hybrid_forward(self, F, output, label):
        output = F.softmax(output)
        pj = output.pick(label, axis=self._axis, keepdims=True)
        loss = - self._alpha * ((1-pj) ** self._gamma) * pj.log()
        return loss.mean(axis=self._batch_axis, exclude=True)


cls_loss = FocalLoss()
print(cls_loss)

# box prediction loss
# use smoothed L1 loss
# f(x) = (sigma*x)^2/2 if x < 1/sigma^2
# f(x) = |x| - 0.5/sigma^2, otherwise
scales = [.5, 1, 10]
x = nd.arange(-2, 2, 0.1)
for i, s in enumerate(scales):
    y = nd.smooth_l1(x, scalar=s)
    plt.plot(x.asnumpy(), y.asnumpy(), color=colors[i])
plt.plot(x.asnumpy(), (x**2).asnumpy(), color=colors[len(scales)])
plt.legend(['scale='+str(s) for s in scales] + ['Square loss'])
plt.show()


class SmoothL1Loss(gluon.loss.Loss):
    def __init__(self,  batch_axis=0, **kwargs):
        super(SmoothL1Loss, self).__init__(None, batch_axis, **kwargs)

    def hybrid_forward(self, F, output, label, mask):
        loss = F.smooth_l1((output-label) * mask, scalar=1.0)
        return loss.mean(self._batch_axis, exclude=True)


box_loss = SmoothL1Loss()
cls_metric = metric.Accuracy()
box_metric = metric.MAE()

train_data.reshape(label_shape=(3, 5))
train_data = test_data.sync_label_shape(train_data)
ctx = mx.cpu()
net = ToySSD(num_class)
net.initialize(mx.init.Xavier(magnitude=2), ctx=ctx)
trainer = gluon.Trainer(net.collect_params(),
                        'sgd', {
    'learning_rate': 0.1, 'wd': 5e-4
})


for epoch in range(30):
    # reset data iterators and metrics
    train_data.reset()
    cls_metric.reset()
    box_metric.reset()
    tic = time.time()
    for i, batch in enumerate(train_data):
        x = batch.data[0].as_in_context(ctx)
        y = batch.label[0].as_in_context(ctx)
        with autograd.record():
            anchors, class_preds, box_preds = net(x)
            box_target, box_mask, cls_target = training_targets(
                anchors, class_preds, y)
            # losses
            loss1 = cls_loss(class_preds, cls_target)
            loss2 = box_loss(box_preds, box_target, box_mask)
            loss = loss1 + loss2
        loss.backward()
        trainer.step(batch_size)
        # update metrics
        cls_metric.update([cls_target], [class_preds.transpose((0,2,1))])
        box_metric.update([box_target], [box_preds * box_mask])

    print('Epoch %2d, train %s %.2f, %s %.5f, time %.1f sec' % (
        epoch, *cls_metric.get(), *box_metric.get(), time.time()-tic
    ))


def process_image(f_name):
    with open(f_name, 'rb') as fd:
        im = image.imdecode(fd.read())

    data = image.imresize(im, data_shape, data_shape)
    data = data.astype('float32') - rgb_mean
    return data.transpose((2, 0, 1)).expand_dims(axis=0), im


def predict(x):
    anchors, cls_preds, box_preds = net(x.as_in_context(ctx))
    cls_probs = nd.SoftmaxActivation(
        cls_preds.transpose((0, 2, 1)), mode='channel'
    )
    return MultiBoxDetection(cls_probs, box_preds, anchors,
                             force_suppress=True, clip=False)


x, im = process_image('/home/alpha/ML/gluon-tutorials-zh/img/pikachu.jpg')
out = predict(x)
print(out.shape)

matplotlib.rcParams['figure.figsize'] = (6,6)


def display(im, out, threshold=0.5):
    plt.imshow(im.asnumpy())
    for row in out:
        row = row.asnumpy()
        class_id, score = int(row[0]), row[1]
        if class_id < 0 or score < threshold:
            continue
        color = colors[class_id%len(colors)]
        box = row[2:6] * np.array([im.shape[0],im.shape[1]]*2)
        rect = box_to_rect(nd.array(box), color, 2)
        plt.gca().add_patch(rect)

        text = class_names[class_id]
        plt.gca().text(box[0], box[1],
                       '{:s} {:.2f}'.format(text, score),
                       bbox=dict(facecolor=color, alpha=0.5),
                       fontsize=10, color='white')
    plt.show()


display(im, out[0], threshold=0.5)
