import mxnet as mx
from rcnn.config import config

def conv_act(data, kernel, pad, stride, num_filter, name):
    conv = mx.sym.Convolution(data=data, kernel=kernel, pad=pad, stride=stride, num_filter=num_filter, name=name)
    conv_bn = mx.sym.BatchNorm(data=conv, name=name+"_bn")
    conv_act = mx.sym.Activation(data=conv_bn, act_type='relu', name=name+'_relu')
    return conv_act

def conv_act_nobn(data, kernel, pad, stride, num_filter, name):
    conv = mx.sym.Convolution(data=data, kernel=kernel, pad=pad, stride=stride, num_filter=num_filter, name=name)
    conv_act = mx.sym.Activation(data=conv, act_type='relu', name=name+'_relu')
    return conv_act

def sfn_symbol(data):
    conv1 = conv_act(data=data, kernel=(7,7), pad=(3,3), stride=(2,2),num_filter=64, name='conv1')
    pool1 = mx.sym.Pooling(data = conv1, kernel=(2,2), pad=(0,0), stride=(2,2), pool_type='max', name = 'pool1')
    res2a_branch1 = mx.sym.Convolution(data=pool1, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=256, name='res2a_branch1')
    res2a_branch2a = conv_act(data=pool1, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=64, name='res2a_branch2a')
    res2a_branch2b = conv_act(data=res2a_branch2a, kernel=(3,3), pad=(1,1), stride=(1,1), num_filter=64, name='res2a_branch2b')
    res2a_branch2c = mx.sym.Convolution(data=res2a_branch2b, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=256, name='res2a_branch2c')
    res2a = mx.sym.broadcast_add(res2a_branch1, res2a_branch2c, name='res2a')
    res2a_act = mx.sym.Activation(data=res2a, act_type='relu', name = 'res2a_relu')
    pool2 = mx.sym.Pooling(data=res2a_act, kernel=(2,2), pad=(0,0), stride=(2,2), pool_type='max', name='pool2')
    res2b_branch2a = conv_act(data=pool2, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=64, name='res2b_branch2a')
    res2b_branch2b = conv_act(data=res2b_branch2a, kernel=(3,3), pad=(1,1), stride=(1,1), num_filter=64, name='res2b_branch2b')
    res2b_branch2c = mx.sym.Convolution(data=res2b_branch2b, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=256, name='res2b_branch2c')
    res2b = mx.sym.broadcast_add(lhs=pool2, rhs=res2b_branch2c, name='res2b')
    return res2b

def rsa_symbol(data, name):
    conv_transfer_1 = conv_act(data=data, kernel=(3,3), pad=(1,1), stride=(1,1), num_filter=256, name=name+'_conv_transfer_1')
    conv_transfer_2 = conv_act(data=conv_transfer_1, kernel=(3, 3), pad=(1, 1), stride=(2, 2), num_filter=256, name=name+'_conv_transfer_2')
    conv_transfer_3 = conv_act(data=conv_transfer_2, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=256, name=name+'_conv_transfer_3')
    conv_transfer_4 = conv_act(data=conv_transfer_3, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=256, name=name+'_conv_transfer_4')
    conv_transfer_5 = mx.sym.Convolution(data=conv_transfer_4, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=256, name=name+'_conv_transfer_5')
    return conv_transfer_5

def lrn_symbol(data, name):
    res2b_act = mx.sym.Activation(data=data, act_type='relu', name=name+'_relu')
    res2c_branch2a = conv_act(data=res2b_act, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=64, name=name+'_res2c_branch2a')
    res2c_branch2b = conv_act(data=res2c_branch2a, kernel=(3,3), pad=(1,1), stride=(1,1), num_filter=64, name=name+'_res2c_branch2b')
    res2c_branch2c = mx.sym.Convolution(data=res2c_branch2b, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=256, name=name+'_res2c_branch2c')
    res2c = mx.sym.broadcast_add(res2b_act, res2c_branch2c, name=name+'_res2c')
    res2c_act = mx.sym.Activation(data=res2c, act_type='relu', name=name+'_res2c_relu')

    res3a_branch1 = mx.sym.Convolution(data=res2c_act, kernel=(1,1), pad=(0,0), stride=(2,2), num_filter=512, name=name+'_res3a_branch1')
    res3a_branch2a = conv_act(data=res2c_act, kernel=(1,1), pad=(0,0), stride=(2,2), num_filter=128, name=name+'_res3a_branch2a')
    res3a_branch2b = conv_act(data=res3a_branch2a, kernel=(3,3), pad=(1,1), stride=(1,1), num_filter=128, name=name+'_res3a_branch2b')
    res3a_branch2c = mx.sym.Convolution(data=res3a_branch2b, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=512, name=name+'_res3a_branch2c')
    res3a = mx.sym.broadcast_add(res3a_branch1, res3a_branch2c, name=name+'_res3a')
    res3a_act = mx.sym.Activation(data=res3a, act_type='relu', name= name+'_res3a_relu')

    res3b1_branch2a = conv_act(data=res3a_act, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=128, name=name+'_res3b1_branch2a')
    res3b1_branch2b = conv_act(data=res3b1_branch2a, kernel=(3,3), pad=(1,1), stride=(1,1), num_filter=128, name=name+'_res3b1_branch2b')
    res3b1_branch2c = mx.sym.Convolution(data=res3b1_branch2b, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=512, name=name+'_res3b1_branch2c')
    res3b1 = mx.sym.broadcast_add(res3a_act, res3b1_branch2c, name=name+'_res3b1')
    res3b1_act = mx.sym.Activation(data=res3b1, act_type='relu', name=name+'_res3b1_relu')

    res3b2_branch2a = conv_act(data=res3b1_act, kernel=(1,1), pad=(0,0), stride=(1,1), num_filter=128, name=name+'_res3b2_branch2a')
    res3b2_branch2b = conv_act(data=res3b2_branch2a, kernel=(3,3), pad=(1,1), stride=(1,1), num_filter=128, name=name+'_res3b2_branch2b')
    res3b2_branch2c = mx.sym.Convolution(data=res3b2_branch2b, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=512, name=name + '_res3b2_branch2c')
    res3b2 = mx.sym.broadcast_add(res3b1_act, res3b2_branch2c, name=name + '_res3b2')
    res3b2_act = mx.sym.Activation(data=res3b2, act_type='relu', name=name + '_res3b2_relu')

    res3b3_branch2a = conv_act(data=res3b2_act, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=128,
                               name=name + '_res3b3_branch2a')
    res3b3_branch2b = conv_act(data=res3b3_branch2a, kernel=(3, 3), pad=(1, 1), stride=(1, 1), num_filter=128,
                               name=name + '_res3b3_branch2b')
    res3b3_branch2c = mx.sym.Convolution(data=res3b3_branch2b, kernel=(1, 1), pad=(0, 0), stride=(1, 1), num_filter=512,
                                         name=name + '_res3b3_branch2c')
    res3b3 = mx.sym.broadcast_add(res3b2_act, res3b3_branch2c, name=name + '_res3b3')
    res3b3_act = mx.sym.Activation(data=res3b3, act_type='relu', name=name + '_res3b3_relu')
    return res3b3_act

def get_ssh_train():
    """
    Region Proposal Network with VGG
    :return: Symbol
    """
    data = mx.symbol.Variable(name="data")
    var_label = False
    var_bbox_weight = False

    # shared convolutional layers
    base1 = sfn_symbol(data=data)
    out1 = lrn_symbol(data=base1, name="out1")
    base2 = rsa_symbol(data=base1, name="base2")
    out2 = lrn_symbol(data=base2, name="out2")
    base3 = rsa_symbol(data=base2, name="base3")
    out3 = lrn_symbol(data=base3, name="out3")

    conv_fpn_feat ={16:out1, 32:out2, 64:out3}

    rpn_cls_score_list = []
    rpn_bbox_pred_list = []
    ret_group = []
    for stride in config.RPN_FEAT_STRIDE:
      num_anchors = config.RPN_ANCHOR_CFG[str(stride)]['NUM_ANCHORS']
      label = mx.symbol.Variable(name='label_stride%d'%stride)
      bbox_target = mx.symbol.Variable(name='bbox_target_stride%d'%stride)
      bbox_weight = mx.symbol.Variable(name='bbox_weight_stride%d'%stride)
      kpoint_target = mx.symbol.Variable(name='kpoint_target_stride%d'%stride)
      kpoint_weight = mx.symbol.Variable(name='kpoint_weight_stride%d'%stride)
      rpn_relu = conv_fpn_feat[stride]

      rpn_cls_score = conv_act_nobn(rpn_relu, (3, 3), (1, 1), (1, 1), 2 * num_anchors,
                                    'rpn_cls_score_stride%d' % stride)

      rpn_bbox_pred = conv_act_nobn(rpn_relu, (3, 3), (1, 1), (1, 1), 4 * num_anchors,
                                    'rpn_bbox_pred_stride%d'%stride)

      rpn_kpoint_pred = conv_act_nobn(rpn_relu, (3, 3), (1, 1), (1, 1), 10 * num_anchors,
                                    'rpn_kpoint_pred_stride%d'%stride)

      # prepare rpn data
      rpn_cls_score_reshape = mx.symbol.Reshape(data=rpn_cls_score,
                                                shape=(0, 2, -1),
                                                name="rpn_cls_score_reshape_stride%s" % stride)
      rpn_bbox_pred_reshape = mx.symbol.Reshape(data=rpn_bbox_pred,
                                                shape=(0, 0, -1),
                                                name="rpn_bbox_pred_reshape_stride%s" % stride)
      rpn_kpoint_pred_reshape = mx.symbol.Reshape(data=rpn_kpoint_pred,
                                                shape=(0, 0, -1),
                                                name="rpn_kpoint_pred_reshape_stride%s" % stride)

      if config.TRAIN.RPN_ENABLE_OHEM<0:
        rpn_bbox_pred_list.append(rpn_bbox_pred_reshape)
        rpn_cls_score_list.append(rpn_cls_score_reshape)
      else:
        if config.TRAIN.RPN_ENABLE_OHEM==2:
          label, kpoint_weight, bbox_weight = mx.sym.Custom(op_type='rpn_fpn_ohem', stride=int(stride), cls_score=rpn_cls_score_reshape, bbox_weight = bbox_weight , kpoint_weight = kpoint_weight, labels = label)
        #label_list.append(label)
        #bbox_weight_list.append(bbox_weight)
        rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape,
                                               label=label,
                                               multi_output=True,
                                               normalization='valid', use_ignore=True, ignore_label=-1,
                                               name='rpn_cls_prob_stride%d'%stride)

        bbox_diff = rpn_bbox_pred_reshape-bbox_target
        bbox_diff = bbox_diff * bbox_weight
        rpn_bbox_loss_ = mx.symbol.smooth_l1(name='rpn_bbox_loss_stride%d_'%stride, scalar=3.0, data=bbox_diff)

        rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss_stride%d'%stride, data=rpn_bbox_loss_, grad_scale=1.0 / (config.TRAIN.RPN_BATCH_SIZE))

        kpoint_diff = rpn_kpoint_pred_reshape-kpoint_target
        kpoint_diff = kpoint_diff * kpoint_weight
        rpn_kpoint_loss_ = mx.symbol.smooth_l1(name='rpn_kpoint_loss_stride%d_'%stride, scalar=5.0, data=kpoint_diff)
        rpn_kpoint_loss = mx.sym.MakeLoss(name='rpn_kpoint_loss_stride%d'%stride, data=rpn_kpoint_loss_, grad_scale=1.0 / (config.TRAIN.RPN_BATCH_SIZE))


        ret_group.append(rpn_cls_prob)
        ret_group.append(mx.sym.BlockGrad(label))
        ret_group.append(rpn_bbox_loss)
        ret_group.append(mx.sym.BlockGrad(bbox_weight))
        ret_group.append(rpn_kpoint_loss)
        ret_group.append(mx.sym.BlockGrad(kpoint_weight))

    return mx.sym.Group(ret_group)