import mxnet as mx
import numpy as np
from mxnet import nd
from mxnet import gluon
from mxnet.gluon import nn as gnn
import utils

__all__ = ['MsDetect', 'MsLoss', 'MiniSSD']

def decode_preds(cls_maps, box_maps):
    '''Decode feature maps to predict (across devices).'''
    def _concat_on_cpu(maps):
        _maps = []
        for mps in zip(*maps):
            mps = [m.as_in_context(mx.cpu()) for m in mps]
            _maps.append(nd.concat(*mps, dim=0))
        return _maps

    if isinstance(cls_maps[0], (list, tuple)):
        cls_maps = _concat_on_cpu(cls_maps)
        box_maps = _concat_on_cpu(box_maps)
    else:
        cls_maps = [m.as_in_context(mx.cpu()) for m in cls_maps]
        box_maps = [m.as_in_context(mx.cpu()) for m in box_maps]
    cls_pred, box_pred = [], []
    for cls_map, box_map in zip(cls_maps, box_maps):
        cls_pred.append(cls_map.reshape(0,0,-1).transpose((0,2,1)))
        box_pred.append(box_map.reshape(0,0,-1).transpose((0,2,1)))
    cls_pred = nd.concat(*cls_pred, dim=1)
    box_pred = nd.concat(*box_pred, dim=1)
    return cls_pred, box_pred

class MsDetect(gluon.Block):
    '''Detect out of Mini-SSD.'''
    def __init__(self, thresh=0.6, nms_thresh=0.5, **kwargs):
        super(MsDetect, self).__init__(**kwargs)
        self.thresh = thresh
        self.nms_thresh = nms_thresh

    def forward(self, cls_maps, box_maps):
        anchors = utils.gen_anchors(cls_maps, [4,8], [16,24], [21,45])
        cls_pred, box_pred = decode_preds(cls_maps, box_maps)
        bboxes = []
        for cpred, bpred in zip(cls_pred, box_pred):
            prob = nd.softmax(cpred)[:,1].asnumpy()
            keep = np.where(prob >= self.thresh)[0]
            bbox = np.hstack([prob[:,None], anchors, bpred.asnumpy()])[keep]
            if bbox.size > 0:
                bbox = bbox[utils.bbox_nms(bbox, self.nms_thresh, 'union')]
            bboxes.append(bbox)
        return bboxes

class MsLoss(gluon.Block):
    """Multi-task loss of Mini-SSD.
       (softmax + smoothl1/euclid) with online hard example mining.

    Parameters
    ----------
    pos_thresh : float, default 0.65
        IoU above pos_thresh assigned as positive face.
    part_thresh : float, default 0.4
        Iou between part_thresh and pos_thresh assigned part face.
    neg_thresh : float, default 0.3
        IoU less than neg_thresh assigned negative face.
    ohem_ratio : float, default 0.7
        The ratio cls_loss to been kept after online hard example mining.
    cls_weight : float, default 1.0
        Scalar weight for ohem softmax loss.
    loc_weight : float, default 1.0
        Scalar weight for L2 loss.
    loc_loss : str, default 'smoothl1'
        Loss used for bounding box localization, optional ['smoothl1', 'euclid']
    """
    def __init__(self, pos_thresh=0.65, part_thresh=0.4, neg_thresh=0.3,
                 ohem_ratio=0.7, cls_weight=1.0, loc_weight=1.0, loc_loss='smoothl1', **kwargs):
        super(MsLoss, self).__init__(**kwargs)
        self._pos_thresh = pos_thresh
        self._part_thresh = part_thresh
        self._neg_thresh = neg_thresh
        self._ohem_ratio = ohem_ratio
        self._cls_weight = cls_weight
        self._loc_weight = loc_weight
        if loc_loss == 'smoothl1':
            self._loc_loss = self._smoothl1_loss
        elif loc_loss == 'euclid':
            self._loc_loss = self._euclid_loss
        else:
            raise ValueError('Unsupported loc loss: {}'.format(loc_loss))
    
    def forward(self, cls_maps, box_maps, targets):
        cls_pred, box_pred = decode_preds(cls_maps, box_maps)
        cls_loss = self._softmax_loss(cls_pred, targets)
        loc_loss = self._loc_loss(box_pred, targets)
        sum_loss =  cls_loss + loc_loss
        cls_losses = self._split_like(cls_loss, cls_maps)
        loc_losses = self._split_like(loc_loss, cls_maps)
        sum_losses = self._split_like(sum_loss, cls_maps)
        return sum_losses, cls_losses, loc_losses
    
    def _softmax_loss(self, preds, targets):
        pos_mask = targets[:,:,0] >= self._pos_thresh
        neg_mask = targets[:,:,0] < self._neg_thresh
        mask = nd.logical_or(pos_mask, neg_mask)
        pred = nd.log_softmax(preds)
        label = nd.where(pos_mask, nd.ones_like(mask), nd.zeros_like(mask))
        loss = -nd.pick(pred, label, axis=2, keepdims=False)
        loss = nd.where(mask, loss, nd.zeros_like(loss))
        keep_num = round(self._ohem_ratio * nd.sum(mask).asscalar())
        keep_mask = loss.reshape(-1,).argsort(is_ascend=False).argsort().reshape_like(mask) < keep_num
        loss = nd.where(keep_mask, loss, nd.zeros_like(loss))
        loss = nd.sum(loss, axis=1)
        return loss / max(keep_num, 1) * self._cls_weight
        
    def _smoothl1_loss(self, preds, targets, ratio=1.0):
        mask = targets[:,:,0] >= self._part_thresh
        loss = nd.abs(preds - targets[:,:,1:])
        loss = nd.where(loss > ratio, loss - 0.5 * ratio, (0.5 / ratio) * nd.square(loss))
        loss = nd.sum(loss, axis=2)
        loss = nd.where(mask, loss, nd.zeros_like(loss))
        loss = nd.sum(loss, axis=1)
        return loss / max(nd.sum(mask).asscalar(), 1) * self._loc_weight
    
    def _euclid_loss(self, preds, targets):
        mask = targets[:,:,0] >= self._part_thresh
        loss = nd.square(preds - targets[:,:,1:]) / 2
        loss = nd.sum(loss, axis=2)
        loss = nd.where(mask, loss, nd.zeros_like(loss))
        loss = nd.sum(loss, axis=1)
        return loss / max(nd.sum(mask).asscalar(), 1) * self._loc_weight
    
    def _split_like(self, loss, maps):
        losses = []
        start_idx = 0
        for mp in maps:
            num = mp[0].shape[0]
            losses.append(loss[start_idx:start_idx+num])
            start_idx += num
        return losses

class MiniSSD(gluon.HybridBlock):
    def __init__(self, **kwargs):
        super(MiniSSD, self).__init__(**kwargs)
        self.stage1 = gnn.HybridSequential()
        self.stage1.add(
            gnn.Conv2D(10, 3, prefix='conv1_'), gnn.PReLU(10, prefix='conv1/prelu_'),
            *self._dw_block(16, 10, 'conv2'),
            *self._dw_block(32, 16, 'conv3'))
        self.stage2 = gnn.HybridSequential()
        self.stage2.add(*self._dw_block(64, 32, 'conv4'))
        self.conv3c = gnn.Conv2D(2, 1, prefix='conv3c_')
        self.conv3d = gnn.Conv2D(4, 1, prefix='conv3d_')
        self.conv4c = gnn.Conv2D(2, 1, prefix='conv4c_')
        self.conv4d = gnn.Conv2D(4, 1, prefix='conv4d_')

    def hybrid_forward(self, F, x):
        x = self.stage1(x)
        cls1, box1 = self.conv3c(x), self.conv3d(x)
        x = self.stage2(x)
        cls2, box2 = self.conv4c(x), self.conv4d(x)
        return [cls1, cls2], [box1, box2]

    def _dw_block(self, channels, inchannels, name):
        layers = []
        layers.extend(self._conv_bn(inchannels, 3, 2, inchannels, name+'a/dw'))
        layers.extend(self._conv_bn(channels, 1, 1, 1, name+'a/pw'))
        layers.extend(self._conv_bn(channels, 3, 1, channels, name+'b/dw'))
        layers.extend(self._conv_bn(channels, 1, 1, 1, name+'b/pw'))
        return layers

    def _conv_bn(self, channels, kernel, stride, group, name):
        layers = []
        layers.append(gnn.Conv2D(channels, kernel, stride, groups=group, use_bias=False, prefix=name+'_'))
        layers.append(gnn.BatchNorm(prefix=name+'/bn_'))
        layers.append(gnn.Activation('relu', prefix=name+'/relu_'))
        return layers

if __name__ == '__main__':
    net = MiniSSD()
    # print(net)
    net.hybridize()
    x = mx.sym.var('data')
    sym = net(x)
    mx.viz.print_summary(sym[3], {'data': (1, 3, 45, 45)}, line_length=100)
