import mxnet as mx
import numpy as np
from rcnn.config import config
from rcnn.PY_OP import fpn_roi_pooling, proposal_fpn, mask_roi, mask_output, proposal_fpn_out, rpn_fpn_ohem

FPN = False
USE_DCN = False


def conv_act_layer(from_layer, name, num_filter, kernel=(1, 1), pad=(0, 0), \
                   stride=(1, 1), act_type="relu", bias_wd_mult=0.0, dcn=False):
    weight = mx.symbol.Variable(name="{}_weight".format(name),
                                init=mx.init.Normal(0.01), attr={'__lr_mult__': '1.0'})
    bias = mx.symbol.Variable(name="{}_bias".format(name),
                              init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0', '__wd_mult__': str(bias_wd_mult)})
    if not dcn:
        conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
                                     stride=stride, num_filter=num_filter, name="{}".format(name), weight=weight,
                                     bias=bias)
    else:
        assert kernel[0] == 3 and kernel[1] == 3
        num_group = 1
        f = num_group * 18
        offset_weight = mx.symbol.Variable(name="{}_offset_weight".format(name),
                                           init=mx.init.Constant(0.0), attr={'__lr_mult__': '1.0'})
        offset_bias = mx.symbol.Variable(name="{}_offset_bias".format(name),
                                         init=mx.init.Constant(0.0),
                                         attr={'__lr_mult__': '2.0', '__wd_mult__': str(bias_wd_mult)})
        conv_offset = mx.symbol.Convolution(name=name + '_offset', data=from_layer, weight=offset_weight,
                                            bias=offset_bias,
                                            num_filter=f, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
        conv = mx.contrib.symbol.DeformableConvolution(name=name, data=from_layer, offset=conv_offset, weight=weight,
                                                       bias=bias,
                                                       num_filter=num_filter, pad=(1, 1), kernel=(3, 3),
                                                       num_deformable_group=num_group, stride=(1, 1), no_bias=False)
    if len(act_type) > 0:
        relu = mx.symbol.Activation(data=conv, act_type=act_type, \
                                    name="{}_{}".format(name, act_type))
    else:
        relu = conv
    return relu


def ssh_context_module(body, num_filters, name):
    conv_dimred = conv_act_layer(body, name + '_conv1',
                                 num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=False)
    conv5x5 = conv_act_layer(conv_dimred, name + '_conv2',
                             num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=USE_DCN)
    conv7x7_1 = conv_act_layer(conv_dimred, name + '_conv3_1',
                               num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=False)
    conv7x7 = conv_act_layer(conv7x7_1, name + '_conv3_2',
                             num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=USE_DCN)
    return (conv5x5, conv7x7)


def ssh_detection_module(body, num_filters, name):
    conv3x3 = conv_act_layer(body, name + '_conv1',
                             num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=USE_DCN)
    conv5x5, conv7x7 = ssh_context_module(body, num_filters // 2, name + '_context')
    ret = mx.sym.concat(*[conv3x3, conv5x5, conv7x7], dim=1, name=name + '_concat')
    # ret = mx.symbol.Activation(data=ret, act_type='relu', name=name+'_concat_relu')
    return ret


def get_feat_down(conv_feat):
    # P5 = mx.symbol.Convolution(data=conv_feat[0], kernel=(1, 1), num_filter=256, name="P5_lateral")
    P5 = conv_act_layer(conv_feat[0], 'P5_lateral',
                        256, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu')

    # P5 2x upsampling + C4 = P4
    P5_up = mx.symbol.UpSampling(P5, scale=2, sample_type='nearest', workspace=512, name='P5_upsampling', num_args=1)
    # P4_la   = mx.symbol.Convolution(data=conv_feat[1], kernel=(1, 1), num_filter=256, name="P4_lateral")
    P4_la = conv_act_layer(conv_feat[1], 'P4_lateral',
                           256, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu')
    P5_clip = mx.symbol.Crop(*[P5_up, P4_la], name="P4_clip")
    P4 = mx.sym.ElementWiseSum(*[P5_clip, P4_la], name="P4_sum")
    # P4      = mx.symbol.Convolution(data=P4, kernel=(3, 3), pad=(1, 1), num_filter=256, name="P4_aggregate")
    P4 = conv_act_layer(P4, 'P4_aggregate',
                        256, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu')

    # P4 2x upsampling + C3 = P3
    P4_up = mx.symbol.UpSampling(P4, scale=2, sample_type='nearest', workspace=512, name='P4_upsampling', num_args=1)
    # P3_la   = mx.symbol.Convolution(data=conv_feat[2], kernel=(1, 1), num_filter=256, name="P3_lateral")
    P3_la = conv_act_layer(conv_feat[2], 'P3_lateral',
                           256, kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='relu')
    P4_clip = mx.symbol.Crop(*[P4_up, P3_la], name="P3_clip")
    P3 = mx.sym.ElementWiseSum(*[P4_clip, P3_la], name="P3_sum")
    # P3      = mx.symbol.Convolution(data=P3, kernel=(3, 3), pad=(1, 1), num_filter=256, name="P3_aggregate")
    P3 = conv_act_layer(P3, 'P3_aggregate',
                        256, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu')

    return P3, P4, P5


def get_ssh_conv(data):
    """
    shared convolutional layers
    :param data: Symbol
    :return: Symbol
    """
    # group 1
    conv1_1 = mx.symbol.Convolution(
        data=data, kernel=(3, 3), pad=(1, 1), num_filter=8, workspace=2048, name="conv1_1")
    conv1_1 = mx.symbol.BatchNorm(data=conv1_1, name="conv1_1_bn")
    relu1_1 = mx.symbol.Activation(data=conv1_1, act_type="relu", name="relu1_1")
    pool1 = mx.symbol.Pooling(
        data=relu1_1, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool1")
    # group 2
    conv2_1 = mx.symbol.Convolution(
        data=pool1, kernel=(3, 3), pad=(1, 1), num_filter=16, workspace=2048, name="conv2_1")
    conv2_1 = mx.symbol.BatchNorm(data=conv2_1, name="conv2_1_bn")
    relu2_1 = mx.symbol.Activation(data=conv2_1, act_type="relu", name="relu2_1")
    pool2 = mx.symbol.Pooling(
        data=relu2_1, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool2")
    # group 3
    conv3_1 = mx.symbol.Convolution(
        data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=32, workspace=2048, name="conv3_1")
    conv3_1 = mx.symbol.BatchNorm(data=conv3_1, name="conv3_1_bn")
    relu3_1 = mx.symbol.Activation(data=conv3_1, act_type="relu", name="relu3_1")
    pool3 = mx.symbol.Pooling(
        data=relu3_1, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool3")
    # group 4
    conv4_1 = mx.symbol.Convolution(
        data=pool3, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv4_1")
    conv4_1 = mx.symbol.BatchNorm(data=conv4_1, name="conv4_1_bn")
    relu4_1 = mx.symbol.Activation(data=conv4_1, act_type="relu", name="relu4_1")
    conv4_2 = mx.symbol.Convolution(
        data=relu4_1, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv4_2")
    conv4_2 = mx.symbol.BatchNorm(data=conv4_2, name="conv4_2_bn")
    relu4_2 = mx.symbol.Activation(data=conv4_2, act_type="relu", name="relu4_2")
    pool4 = mx.symbol.Pooling(
        data=relu4_2, pool_type="max", kernel=(2, 2), stride=(2, 2), name="pool4")
    # group 5
    conv5_1 = mx.symbol.Convolution(
        data=pool4, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv5_1")
    conv5_1 = mx.symbol.BatchNorm(data=conv5_1, name="conv5_1_bn")
    relu5_1 = mx.symbol.Activation(data=conv5_1, act_type="relu", name="relu5_1")
    conv5_2 = mx.symbol.Convolution(
        data=relu5_1, kernel=(3, 3), pad=(1, 1), num_filter=64, workspace=2048, name="conv5_2")
    conv5_2 = mx.symbol.BatchNorm(data=conv5_2, name="conv5_2_bn")
    relu5_2 = mx.symbol.Activation(data=conv5_2, act_type="relu", name="relu5_2")
    m3_pool = mx.sym.Pooling(data=relu5_2, kernel=(2, 2), stride=(2, 2), pad=(0, 0), pool_type='max')
    if FPN:
        relu4_2, relu5_3, m3_pool = get_feat_down([m3_pool, relu5_3, relu4_2])

    F1 = 64
    F2 = 128

    m1 = ssh_detection_module(relu4_2, F2, 'ssh_m1_det')
    m2 = ssh_detection_module(relu5_2, F1, 'ssh_m2_det')
    m3 = ssh_detection_module(m3_pool, F1, 'ssh_m3_det')

    if config.yawpose:
        yaw1 = ssh_detection_module(relu4_2, F2, 'ssh_yaw1_det')
        yaw2 = ssh_detection_module(relu5_2, F1, 'ssh_yaw2_det')
        yaw3 = ssh_detection_module(m3_pool, F1, 'ssh_yaw3_det')
    res = [{8: m1, 16: m2, 32: m3}]
    if config.yawpose:
        res.append({8: yaw1, 16: yaw2, 32: yaw3})
    return res


def get_ssh_train():
    """
    Region Proposal Network with VGG
    :return: Symbol
    """
    data = mx.symbol.Variable(name="data")
    var_label = False
    var_bbox_weight = False

    # shared convolutional layers
    conv_ssh_feat = get_ssh_conv(data)
    conv_fpn_feat = conv_ssh_feat[0]
    if config.yawpose:
        conv_yaw_feat = conv_ssh_feat[1]
    rpn_cls_score_list = []
    rpn_bbox_pred_list = []
    ret_group = []
    for stride in config.RPN_FEAT_STRIDE:
        num_anchors = config.RPN_ANCHOR_CFG[str(stride)]['NUM_ANCHORS']
        label = mx.symbol.Variable(name='label_stride%d' % stride)
        bbox_target = mx.symbol.Variable(name='bbox_target_stride%d' % stride)
        bbox_weight = mx.symbol.Variable(name='bbox_weight_stride%d' % stride)
        kpoint_target = mx.symbol.Variable(name='kpoint_target_stride%d' % stride)
        kpoint_weight = mx.symbol.Variable(name='kpoint_weight_stride%d' % stride)

        if config.yawpose:
            label_yawpose = mx.symbol.Variable(name='label_yawpose_stride%d' % stride)

        rpn_relu = conv_fpn_feat[stride]
        if not config.USE_MAXOUT or stride != config.RPN_FEAT_STRIDE[-1]:
            rpn_cls_score = conv_act_layer(rpn_relu, 'rpn_cls_score_stride%d' % stride, 2 * num_anchors,
                                           kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='')
            if config.yawpose:
                rpn_yaw = conv_yaw_feat[stride]
                rpn_yawpose_score = conv_act_layer(rpn_yaw, 'rpn_yawpose_score_stride%d' % stride, 3 * num_anchors,
                                                  kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='')
        else:
            cls_list = []
            for a in range(num_anchors):
                rpn_cls_score_bg = conv_act_layer(rpn_relu, 'rpn_cls_score_stride%d_anchor%d_bg' % (stride, a), 3,
                                                  kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='')
                rpn_cls_score_bg = mx.sym.max(rpn_cls_score_bg, axis=1, keepdims=True)
                cls_list.append(rpn_cls_score_bg)
                rpn_cls_score_fg = conv_act_layer(rpn_relu, 'rpn_cls_score_stride%d_anchor%d_fg' % (stride, a), 1,
                                                  kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='')
                cls_list.append(rpn_cls_score_fg)
            rpn_cls_score = mx.sym.concat(*cls_list, dim=1)
        rpn_bbox_pred = conv_act_layer(rpn_relu, 'rpn_bbox_pred_stride%d' % stride, 4 * num_anchors,
                                       kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='')
        rpn_kpoint_pred = conv_act_layer(rpn_relu, 'rpn_kpoint_pred_stride%d' % stride, 10 * num_anchors,
                                         kernel=(1, 1), pad=(0, 0), stride=(1, 1), act_type='')

        # prepare rpn data
        rpn_cls_score_reshape = mx.symbol.Reshape(data=rpn_cls_score,
                                                  shape=(0, 2, -1),
                                                  name="rpn_cls_score_reshape_stride%s" % stride)
        if config.yawpose:
            rpn_yawpose_score_reshape = mx.symbol.Reshape(data=rpn_yawpose_score,
                                                         shape=(0, 3, -1),
                                                         name="rpn_yawpose_score_reshape_stride%s" % stride)
        rpn_bbox_pred_reshape = mx.symbol.Reshape(data=rpn_bbox_pred,
                                                  shape=(0, 0, -1),
                                                  name="rpn_bbox_pred_reshape_stride%s" % stride)
        rpn_kpoint_pred_reshape = mx.symbol.Reshape(data=rpn_kpoint_pred,
                                                    shape=(0, 0, -1),
                                                    name="rpn_kpoint_pred_reshape_stride%s" % stride)

        if config.TRAIN.RPN_ENABLE_OHEM < 0:
            rpn_bbox_pred_list.append(rpn_bbox_pred_reshape)
            rpn_cls_score_list.append(rpn_cls_score_reshape)
        else:
            if config.TRAIN.RPN_ENABLE_OHEM == 2:
                label, kpoint_weight, bbox_weight = mx.sym.Custom(op_type='rpn_fpn_ohem', stride=int(stride),
                                                                  cls_score=rpn_cls_score_reshape,
                                                                  bbox_weight=bbox_weight, kpoint_weight=kpoint_weight,
                                                                  labels=label)
            # label_list.append(label)
            # bbox_weight_list.append(bbox_weight)
            rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape,
                                                   label=label,
                                                   multi_output=True,
                                                   normalization='valid', use_ignore=True, ignore_label=-1,
                                                   name='rpn_cls_prob_stride%d' % stride)
            if config.yawpose:
                rpn_yawpose_prob = mx.symbol.SoftmaxOutput(data=rpn_yawpose_score_reshape,
                                                           label=label_yawpose,
                                                           multi_output=True,
                                                           normalization='valid', use_ignore=True, ignore_label=-1,
                                                           name='rpn_yawpose_prob_stride%d' % stride)

            bbox_diff = rpn_bbox_pred_reshape - bbox_target
            bbox_diff = bbox_diff * bbox_weight
            rpn_bbox_loss_ = mx.symbol.smooth_l1(name='rpn_bbox_loss_stride%d_' % stride, scalar=3.0, data=bbox_diff)

            rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss_stride%d' % stride, data=rpn_bbox_loss_,
                                            grad_scale=1.0 / (config.TRAIN.RPN_BATCH_SIZE))

            kpoint_diff = rpn_kpoint_pred_reshape - kpoint_target
            kpoint_diff = kpoint_diff * kpoint_weight
            rpn_kpoint_loss_ = mx.symbol.smooth_l1(name='rpn_kpoint_loss_stride%d_' % stride, scalar=5.0,
                                                   data=kpoint_diff)
            rpn_kpoint_loss = mx.sym.MakeLoss(name='rpn_kpoint_loss_stride%d' % stride, data=rpn_kpoint_loss_,
                                              grad_scale=1.0 / (config.TRAIN.RPN_BATCH_SIZE))

            ret_group.append(rpn_cls_prob)
            ret_group.append(mx.sym.BlockGrad(label))
            ret_group.append(rpn_bbox_loss)
            ret_group.append(mx.sym.BlockGrad(bbox_weight))
            ret_group.append(rpn_kpoint_loss)
            ret_group.append(mx.sym.BlockGrad(kpoint_weight))

            if config.yawpose:
                ret_group.append(rpn_yawpose_prob)
                ret_group.append(label_yawpose)

    return mx.sym.Group(ret_group)
