import mxnet as mx
import numpy as np
from rcnn.config import config
from rcnn.PY_OP import fpn_roi_pooling, proposal_fpn, mask_roi, mask_output, proposal_fpn_out, rpn_fpn_ohem
FPN = False
USE_DCN=False

def conv_act_layer(from_layer, name, num_filter, kernel=(1,1), pad=(0,0), \
				   stride=(1,1), act_type="relu", bias_wd_mult=0.0, dcn=False):

	weight = mx.symbol.Variable(name="{}_weight".format(name),
								init=mx.init.Normal(0.01), attr={'__lr_mult__': '1.0'})
	bias = mx.symbol.Variable(name="{}_bias".format(name),
							  init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0', '__wd_mult__': str(bias_wd_mult)})
	if not dcn:
		conv = mx.symbol.Convolution(data=from_layer, kernel=kernel, pad=pad, \
									 stride=stride, num_filter=num_filter, name="{}".format(name), weight = weight, bias=bias)
	else:
		assert kernel[0]==3 and kernel[1]==3
		num_group = 1
		f = num_group*18
		offset_weight = mx.symbol.Variable(name="{}_offset_weight".format(name),
										   init=mx.init.Constant(0.0), attr={'__lr_mult__': '1.0'})
		offset_bias = mx.symbol.Variable(name="{}_offset_bias".format(name),
										 init=mx.init.Constant(0.0), attr={'__lr_mult__': '2.0', '__wd_mult__': str(bias_wd_mult)})
		conv_offset = mx.symbol.Convolution(name=name+'_offset', data = from_layer, weight=offset_weight, bias=offset_bias,
											num_filter=f, pad=(1, 1), kernel=(3, 3), stride=(1, 1))
		conv = mx.contrib.symbol.DeformableConvolution(name=name, data=from_layer, offset=conv_offset, weight=weight, bias=bias,
													   num_filter=num_filter, pad=(1,1), kernel=(3, 3), num_deformable_group=num_group, stride=(1, 1), no_bias=False)
	if len(act_type)>0:
		relu = mx.symbol.Activation(data=conv, act_type=act_type, \
									name="{}_{}".format(name, act_type))
	else:
		relu = conv
	return relu

def ssh_context_module(body, num_filters, name):
	conv_dimred = conv_act_layer(body, name+'_conv1',
								 num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=False)
	conv5x5 = conv_act_layer(conv_dimred, name+'_conv2',
							 num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=USE_DCN)
	conv7x7_1 = conv_act_layer(conv_dimred, name+'_conv3_1',
							   num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=False)
	conv7x7 = conv_act_layer(conv7x7_1, name+'_conv3_2',
							 num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=USE_DCN)
	return (conv5x5, conv7x7)

def ssh_detection_module(body, num_filters, name):
	conv3x3 = conv_act_layer(body, name+'_conv1',
							 num_filters, kernel=(3, 3), pad=(1, 1), stride=(1, 1), act_type='relu', dcn=USE_DCN)
	conv5x5, conv7x7 = ssh_context_module(body, num_filters//2, name+'_context')
	ret = mx.sym.concat(*[conv3x3, conv5x5, conv7x7], dim=1, name = name+'_concat')
	# ret = mx.symbol.Activation(data=ret, act_type='relu', name=name+'_concat_relu')
	return ret

def get_feat_down(conv_feat):
	#P5 = mx.symbol.Convolution(data=conv_feat[0], kernel=(1, 1), num_filter=256, name="P5_lateral")
	P5 = conv_act_layer(conv_feat[0], 'P5_lateral',
						256, kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='relu')

	# P5 2x upsampling + C4 = P4
	P5_up   = mx.symbol.UpSampling(P5, scale=2, sample_type='nearest', workspace=512, name='P5_upsampling', num_args=1)
	#P4_la   = mx.symbol.Convolution(data=conv_feat[1], kernel=(1, 1), num_filter=256, name="P4_lateral")
	P4_la = conv_act_layer(conv_feat[1], 'P4_lateral',
						   256, kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='relu')
	P5_clip = mx.symbol.Crop(*[P5_up, P4_la], name="P4_clip")
	P4      = mx.sym.ElementWiseSum(*[P5_clip, P4_la], name="P4_sum")
	#P4      = mx.symbol.Convolution(data=P4, kernel=(3, 3), pad=(1, 1), num_filter=256, name="P4_aggregate")
	P4 = conv_act_layer(P4, 'P4_aggregate',
						256, kernel=(3,3), pad=(1,1), stride=(1, 1), act_type='relu')

	# P4 2x upsampling + C3 = P3
	P4_up   = mx.symbol.UpSampling(P4, scale=2, sample_type='nearest', workspace=512, name='P4_upsampling', num_args=1)
	#P3_la   = mx.symbol.Convolution(data=conv_feat[2], kernel=(1, 1), num_filter=256, name="P3_lateral")
	P3_la = conv_act_layer(conv_feat[2], 'P3_lateral',
						   256, kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='relu')
	P4_clip = mx.symbol.Crop(*[P4_up, P3_la], name="P3_clip")
	P3      = mx.sym.ElementWiseSum(*[P4_clip, P3_la], name="P3_sum")
	#P3      = mx.symbol.Convolution(data=P3, kernel=(3, 3), pad=(1, 1), num_filter=256, name="P3_aggregate")
	P3 = conv_act_layer(P3, 'P3_aggregate',
						256, kernel=(3,3), pad=(1,1), stride=(1, 1), act_type='relu')

	return P3, P4, P5


def Act(data, act_type, name):
	if act_type == 'prelu':
		body=mx.sym.LeakyReLU(data=data, act_type='prelu', name=name)
	else:
		body=mx.sym.Activation(data=data, act_type=act_type, name=name)
	return body


def Conv(data, num_filter=1, kernel=(1, 1), stride=(1, 1), pad=(0, 0), num_group=1, name=None, suffix=''):
	# type: (object, object, object, object, object, object, object, object) -> object
	conv=mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, num_group=num_group, stride=stride,
							pad=pad, no_bias=True, name='%s%s_conv2d' % (name, suffix))
	bn=mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' % (name, suffix), fix_gamma=False, momentum=0.9)
	act=Act(data=bn, act_type='prelu', name='%s%s_relu' % (name, suffix))
	return act

def Linear(data, num_filter=1, kernel=(1, 1), stride=(1, 1), pad=(0, 0), num_group=1, name=None, suffix=''):
	conv=mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, num_group=num_group, stride=stride,
							pad=pad, no_bias=True, name='%s%s_conv2d' % (name, suffix))
	bn=mx.sym.BatchNorm(data=conv, name='%s%s_batchnorm' % (name, suffix), fix_gamma=False, momentum=config.bn_mom)
	return bn

def ConvOnly(data, num_filter=1, kernel=(1, 1), stride=(1, 1), pad=(0, 0), num_group=1, name=None, suffix=''):
	conv=mx.sym.Convolution(data=data, num_filter=num_filter, kernel=kernel, num_group=num_group, stride=stride,
							pad=pad, no_bias=True, name='%s%s_conv2d' % (name, suffix))
	return conv


def ConvDW(data, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=1, name=None, suffix=''):
	conv = Conv(data=data, num_filter=num_group, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name='%s%s_conv_sep' % (name, suffix))
	conv_dw = Conv(data=conv, num_filter=num_group, num_group=num_group, kernel=kernel, pad=pad, stride=stride,
				   name='%s%s_conv_dw' % (name, suffix))
	return conv_dw

def DResidual(data, num_out=1, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=1, name=None, suffix=''):
	conv = Conv(data=data, num_filter=num_group, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name='%s%s_conv_sep' %(name, suffix))
	conv_dw = Conv(data=conv, num_filter=num_group, num_group=num_group, kernel=kernel, pad=pad, stride=stride, name='%s%s_conv_dw' %(name, suffix))
	proj = Linear(data=conv_dw, num_filter=num_out, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name='%s%s_conv_proj' %(name, suffix))
	return proj

def get_ssh_conv(data):
	conv1 = Conv(data=data, num_filter=24, kernel=(3, 3), pad=(0,0), stride=(2,2), name='conv1')
	conv2 = Conv(conv1, num_filter=24, num_group=24, kernel=(3,3), pad=(0,0), stride=(2,2), name='conv2_dw')
	conv3 = ConvDW(conv2, kernel=(3,3), stride=(2,2), pad=(1,1), num_group=48, name='conv3')
	conv4 = ConvDW(conv3, kernel=(3,3), stride=(1,1), pad=(1,1), num_group=64, name='conv4')
	conv5 = ConvDW(conv4, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=128, name='conv5')
	conv6 = ConvDW(conv5, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=128, name='conv6')

	if FPN:
		conv4, conv5, conv6 = get_feat_down([conv6, conv5, conv4])

	F1 = 32
	F2 = 64

	m1 = ssh_detection_module(conv4, F2, 'ssh_m1_det')
	m2 = ssh_detection_module(conv5, F1, 'ssh_m2_det')
	m3 = ssh_detection_module(conv6, F1, 'ssh_m3_det')
	return {8: m1, 16:m2, 32: m3}


def get_ssh_train():
	"""
	Region Proposal Network with VGG
	:return: Symbol
	"""
	data = mx.symbol.Variable(name="data")
	var_label = False
	var_bbox_weight = False

	# shared convolutional layers
	conv_fpn_feat = get_ssh_conv(data)
	rpn_cls_score_list = []
	rpn_bbox_pred_list = []
	ret_group = []
	for stride in config.RPN_FEAT_STRIDE:
		num_anchors = config.RPN_ANCHOR_CFG[str(stride)]['NUM_ANCHORS']
		label = mx.symbol.Variable(name='label_stride%d'%stride)
		bbox_target = mx.symbol.Variable(name='bbox_target_stride%d'%stride)
		bbox_weight = mx.symbol.Variable(name='bbox_weight_stride%d'%stride)
		kpoint_target = mx.symbol.Variable(name='kpoint_target_stride%d'%stride)
		kpoint_weight = mx.symbol.Variable(name='kpoint_weight_stride%d'%stride)
		rpn_relu = conv_fpn_feat[stride]
		if not config.USE_MAXOUT or stride!=config.RPN_FEAT_STRIDE[-1]:
			rpn_cls_score = conv_act_layer(rpn_relu, 'rpn_cls_score_stride%d'%stride, 2*num_anchors,
										   kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
		else:
			cls_list = []
			for a in range(num_anchors):
				rpn_cls_score_bg = conv_act_layer(rpn_relu, 'rpn_cls_score_stride%d_anchor%d_bg'%(stride,a), 3,
												  kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
				rpn_cls_score_bg = mx.sym.max(rpn_cls_score_bg, axis=1, keepdims=True)
				cls_list.append(rpn_cls_score_bg)
				rpn_cls_score_fg = conv_act_layer(rpn_relu, 'rpn_cls_score_stride%d_anchor%d_fg'%(stride,a), 1,
												  kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
				cls_list.append(rpn_cls_score_fg)
			rpn_cls_score = mx.sym.concat(*cls_list, dim=1)
		rpn_bbox_pred = conv_act_layer(rpn_relu, 'rpn_bbox_pred_stride%d'%stride, 4*num_anchors,
									   kernel=(1,1), pad=(0,0), stride=(1, 1), act_type='')
		rpn_kpoint_pred = conv_act_layer(rpn_relu, 'rpn_kpoint_pred_stride%d'%stride, 10*num_anchors,
										 kernel=(1,1), pad=(0,0), stride=(1,1), act_type='')

		# prepare rpn data
		rpn_cls_score_reshape = mx.symbol.Reshape(data=rpn_cls_score,
												  shape=(0, 2, -1),
												  name="rpn_cls_score_reshape_stride%s" % stride)
		rpn_bbox_pred_reshape = mx.symbol.Reshape(data=rpn_bbox_pred,
												  shape=(0, 0, -1),
												  name="rpn_bbox_pred_reshape_stride%s" % stride)
		rpn_kpoint_pred_reshape = mx.symbol.Reshape(data=rpn_kpoint_pred,
													shape=(0, 0, -1),
													name="rpn_kpoint_pred_reshape_stride%s" % stride)

		if config.TRAIN.RPN_ENABLE_OHEM<0:
			rpn_bbox_pred_list.append(rpn_bbox_pred_reshape)
			rpn_cls_score_list.append(rpn_cls_score_reshape)
		else:
			if config.TRAIN.RPN_ENABLE_OHEM==2:
				label, kpoint_weight, bbox_weight = mx.sym.Custom(op_type='rpn_fpn_ohem', stride=int(stride), cls_score=rpn_cls_score_reshape, bbox_weight = bbox_weight , kpoint_weight = kpoint_weight, labels = label)
			#label_list.append(label)
			#bbox_weight_list.append(bbox_weight)
			rpn_cls_prob = mx.symbol.SoftmaxOutput(data=rpn_cls_score_reshape,
												   label=label,
												   multi_output=True,
												   normalization='valid', use_ignore=True, ignore_label=-1,
												   name='rpn_cls_prob_stride%d'%stride)

			bbox_diff = rpn_bbox_pred_reshape-bbox_target
			bbox_diff = bbox_diff * bbox_weight
			rpn_bbox_loss_ = mx.symbol.smooth_l1(name='rpn_bbox_loss_stride%d_'%stride, scalar=3.0, data=bbox_diff)

			rpn_bbox_loss = mx.sym.MakeLoss(name='rpn_bbox_loss_stride%d'%stride, data=rpn_bbox_loss_, grad_scale=1.0 / (config.TRAIN.RPN_BATCH_SIZE))

			kpoint_diff = rpn_kpoint_pred_reshape-kpoint_target
			kpoint_diff = kpoint_diff * kpoint_weight
			rpn_kpoint_loss_ = mx.symbol.smooth_l1(name='rpn_kpoint_loss_stride%d_'%stride, scalar=5.0, data=kpoint_diff)
			rpn_kpoint_loss = mx.sym.MakeLoss(name='rpn_kpoint_loss_stride%d'%stride, data=rpn_kpoint_loss_, grad_scale=1.0 / (config.TRAIN.RPN_BATCH_SIZE))


			ret_group.append(rpn_cls_prob)
			ret_group.append(mx.sym.BlockGrad(label))
			ret_group.append(rpn_bbox_loss)
			ret_group.append(mx.sym.BlockGrad(bbox_weight))
			ret_group.append(rpn_kpoint_loss)
			ret_group.append(mx.sym.BlockGrad(kpoint_weight))

	return mx.sym.Group(ret_group)


