# -*- coding: utf-8 -*-

from __future__ import absolute_import, print_function, division
import tensorflow.contrib.slim as slim
import tensorflow as tf

from libs.networks.mobilenet import mobilenet_v2
from libs.networks.mobilenet.mobilenet import training_scope
from libs.networks.mobilenet.mobilenet_v2 import op
from libs.networks.mobilenet.mobilenet_v2 import ops
import tfplot as tfp
from libs.configs import cfgs
from libs.core.relation_module import RelationModule


expand_input = ops.expand_input_by_factor

V2_BASE_DEF = dict(
    defaults={
        # Note: these parameters of batch norm affect the architecture
        # that's why they are here and not in training_scope.
        (slim.batch_norm,): {'center': True, 'scale': True},
        (slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
            'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
        },
        (ops.expanded_conv,): {
            'expansion_size': expand_input(6),
            'split_expansion': 1,
            'normalizer_fn': slim.batch_norm,
            'residual': True
        },
        (slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
    },
    spec=[
        op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
        op(ops.expanded_conv,
           expansion_size=expand_input(1, divisible_by=1),
           num_outputs=16, scope='expanded_conv'),
        op(ops.expanded_conv, stride=2, num_outputs=24, scope='expanded_conv_1'),
        op(ops.expanded_conv, stride=1, num_outputs=24, scope='expanded_conv_2'),
        op(ops.expanded_conv, stride=2, num_outputs=32, scope='expanded_conv_3'),
        op(ops.expanded_conv, stride=1, num_outputs=32, scope='expanded_conv_4'),
        op(ops.expanded_conv, stride=1, num_outputs=32, scope='expanded_conv_5'),
        op(ops.expanded_conv, stride=2, num_outputs=64, scope='expanded_conv_6'),
        op(ops.expanded_conv, stride=1, num_outputs=64, scope='expanded_conv_7'),
        op(ops.expanded_conv, stride=1, num_outputs=64, scope='expanded_conv_8'),
        op(ops.expanded_conv, stride=1, num_outputs=64, scope='expanded_conv_9'),
        op(ops.expanded_conv, stride=1, num_outputs=96, scope='expanded_conv_10'),
        op(ops.expanded_conv, stride=1, num_outputs=96, scope='expanded_conv_11'),
        op(ops.expanded_conv, stride=1, num_outputs=96, scope='expanded_conv_12')
    ],
)


def fusion_two_layer(C_i, P_j, scope):
    '''
    i = j-1
    :param C_i: shape is [1, h, w, c]
    :param P_j: shape is [1, h/2, w/2, 256]
    :return:
    P_i
    '''
    with tf.variable_scope(scope):
        level_name = scope.split('_')[1]
        h, w = tf.shape(C_i)[1], tf.shape(C_i)[2]
        # 将p特征图上采样至C特征一样的大小
        upsample_p = tf.image.resize_bilinear(P_j,
                                              size=[h, w],
                                              name='up_sample_'+level_name)

        reduce_dim_c = slim.conv2d(C_i,
                                   num_outputs=256,
                                   kernel_size=[1, 1], stride=1,
                                   scope='reduce_dim_'+level_name)

        add_f = 0.5*upsample_p + 0.5*reduce_dim_c

        # P_i = slim.conv2d(add_f,
        #                   num_outputs=256, kernel_size=[3, 3], stride=1,
        #                   padding='SAME',
        #                   scope='fusion_'+level_name)
        return add_f

def add_heatmap(feature_maps, name):
    '''

    :param feature_maps:[B, H, W, C]
    :return:
    '''

    def figure_attention(activation):
        fig, ax = tfp.subplots()
        im = ax.imshow(activation, cmap='jet')
        fig.colorbar(im)
        return fig

    heatmap = tf.reduce_sum(feature_maps, axis=-1)
    heatmap = tf.squeeze(heatmap, axis=0)
    tfp.summary.plot(name, figure_attention, [heatmap])


V2_BASE_DEF_C2 = dict(
    defaults={
        # Note: these parameters of batch norm affect the architecture
        # that's why they are here and not in training_scope.
        (slim.batch_norm,): {'center': True, 'scale': True},
        (slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
            'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
        },
        (ops.expanded_conv,): {
            'expansion_size': expand_input(6),
            'split_expansion': 1,
            'normalizer_fn': slim.batch_norm,
            'residual': True
        },
        (slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
    },
    spec=[
        op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
        op(ops.expanded_conv,
           expansion_size=expand_input(1, divisible_by=1),
           num_outputs=16, scope='expanded_conv'),
        op(ops.expanded_conv, stride=2, num_outputs=24, scope='expanded_conv_1'),
        op(ops.expanded_conv, stride=1, num_outputs=24, scope='expanded_conv_2'),
        op(ops.expanded_conv, stride=2, num_outputs=32, scope='expanded_conv_3'),
        op(ops.expanded_conv, stride=1, num_outputs=32, scope='expanded_conv_4'),
        op(ops.expanded_conv, stride=1, num_outputs=32, scope='expanded_conv_5')
    ],
)

V2_BASE_DEF_C3 = dict(
    defaults={
        # Note: these parameters of batch norm affect the architecture
        # that's why they are here and not in training_scope.
        (slim.batch_norm,): {'center': True, 'scale': True},
        (slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
            'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
        },
        (ops.expanded_conv,): {
            'expansion_size': expand_input(6),
            'split_expansion': 1,
            'normalizer_fn': slim.batch_norm,
            'residual': True
        },
        (slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
    },
    spec=[
        op(ops.expanded_conv, stride=2, num_outputs=64, scope='expanded_conv_6'),
        op(ops.expanded_conv, stride=1, num_outputs=64, scope='expanded_conv_7'),
        op(ops.expanded_conv, stride=1, num_outputs=64, scope='expanded_conv_8'),
        op(ops.expanded_conv, stride=1, num_outputs=64, scope='expanded_conv_9')
    ],
)
V2_BASE_DEF_C4 = dict(
    defaults={
        # Note: these parameters of batch norm affect the architecture
        # that's why they are here and not in training_scope.
        (slim.batch_norm,): {'center': True, 'scale': True},
        (slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
            'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
        },
        (ops.expanded_conv,): {
            'expansion_size': expand_input(6),
            'split_expansion': 1,
            'normalizer_fn': slim.batch_norm,
            'residual': True
        },
        (slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
    },
    spec=[
        op(ops.expanded_conv, stride=1, num_outputs=96, scope='expanded_conv_10'),
        op(ops.expanded_conv, stride=1, num_outputs=96, scope='expanded_conv_11'),
        op(ops.expanded_conv, stride=1, num_outputs=96, scope='expanded_conv_12')
    ],
)

V2_HEAD_DEF = dict(
    defaults={
        # Note: these parameters of batch norm affect the architecture
        # that's why they are here and not in training_scope.
        (slim.batch_norm,): {'center': True, 'scale': True},
        (slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
            'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
        },
        (ops.expanded_conv,): {
            'expansion_size': expand_input(6),
            'split_expansion': 1,
            'normalizer_fn': slim.batch_norm,
            'residual': True
        },
        (slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
    },
    spec=[
        op(ops.expanded_conv, stride=2, num_outputs=160, scope='expanded_conv_13'),
        op(ops.expanded_conv, stride=1, num_outputs=160, scope='expanded_conv_14'),
        op(ops.expanded_conv, stride=1, num_outputs=160, scope='expanded_conv_15'),
        op(ops.expanded_conv, stride=1, num_outputs=320, scope='expanded_conv_16'),
        op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1024, scope='Conv_1')
    ],
)



def mobilenetv2_scope(is_training=True,
                      trainable=True,
                      weight_decay=0.00004,
                      stddev=0.09,
                      dropout_keep_prob=0.8,
                      bn_decay=0.997):
  """Defines Mobilenet training scope.
  In default. We do not use BN

  ReWrite the scope.
  """
  batch_norm_params = {
      'is_training': False,
      'trainable': False,
      'decay': bn_decay,
  }
  with slim.arg_scope(training_scope(is_training=is_training, weight_decay=weight_decay)):
      with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.separable_conv2d],
                          trainable=trainable):
          with slim.arg_scope([slim.batch_norm], **batch_norm_params) as sc:
              return sc



def mobilenetv2_base(img_batch,scope_name, is_training=True):

    with slim.arg_scope(mobilenetv2_scope(is_training=is_training, trainable=True)):

        # feature_to_crop, endpoints = mobilenet_v2.mobilenet_base(input_tensor=img_batch,
        #                                               num_classes=None,
        #                                               is_training=False,
        #                                               depth_multiplier=1.0,
        #                                               scope='MobilenetV2',
        #                                               conv_defs=V2_BASE_DEF,
        #                                               finegrain_classification_mode=False)

        # feature_to_crop = tf.Print(feature_to_crop, [tf.shape(feature_to_crop)], summarize=10, message='rpn_shape')

        C2, end_points_C2 = mobilenet_v2.mobilenet_base(input_tensor=img_batch,
                                                      num_classes=None,
                                                      is_training=False,
                                                      depth_multiplier=1.0,
                                                      scope='MobilenetV2',
                                                      conv_defs=V2_BASE_DEF_C2,
                                                      finegrain_classification_mode=False)
        # C2 = tf.Print(C2, [tf.shape(C2)], summarize=10, message='C2_shape')
        add_heatmap(C2, name='Layer2/C2_heat')  # 在tensorboard上增加热图

        C3, end_points_C3 = mobilenet_v2.mobilenet_base(input_tensor=C2,
                                                        num_classes=None,
                                                        is_training=False,
                                                        depth_multiplier=1.0,
                                                        scope='MobilenetV2',
                                                        conv_defs=V2_BASE_DEF_C3,
                                                        finegrain_classification_mode=False)
        # C3 = tf.Print(C3, [tf.shape(C3)], summarize=10, message='C3_shape')
        add_heatmap(C3, name='Layer3/C3_heat')  # 在tensorboard上增加热图
        C4, end_points_C4 = mobilenet_v2.mobilenet_base(input_tensor=C3,
                                                        num_classes=None,
                                                        is_training=False,
                                                        depth_multiplier=1.0,
                                                        scope='MobilenetV2',
                                                        conv_defs=V2_BASE_DEF_C4,
                                                        finegrain_classification_mode=False)
        # C4 = tf.Print(C4, [tf.shape(C4)], summarize=10, message='C4_shape')
        add_heatmap(C4, name='Layer4/C4_heat')  # 在tensorboard上增加热图
        # # featuremap
        # feature_dict = {'C2': end_points_C2['{}/block1/unit_2/bottleneck_v1'.format(scope_name)],
        #                 'C3': end_points_C3['{}/block2/unit_3/bottleneck_v1'.format(scope_name)],
        #                 'C4': end_points_C4['{}/block3/unit_{}/bottleneck_v1'.format(scope_name, middle_num_units - 1)],
        #                 'C5': end_points_C5['{}/block4/unit_3/bottleneck_v1'.format(scope_name)],
        #                 # 'C5': end_points_C5['{}/block4'.format(scope_name)],
        #                 }

        feature_dict = {'C2': C2,
                        'C3': C3,
                        'C4': C4,
                        }

        pyramid_dict = {}
        if cfgs.BUILD_PYRAMID:
            with tf.variable_scope('build_pyramid'):
                with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY),
                                    activation_fn=None, normalizer_fn=None):
                    # 这个地方与原文说的有点对不上，P5应该进一步经过3x3的卷积才变成P5
                    P4 = slim.conv2d(C4,
                                     num_outputs=256,
                                     kernel_size=[1, 1],
                                     stride=1, scope='build_P4')

                    pyramid_dict['P4'] = P4

                    for level in range(3, 1, -1):  # build [ P3, P2]

                        pyramid_dict['P%d' % level] = fusion_two_layer(C_i=feature_dict["C%d" % level],
                                                                       P_j=pyramid_dict["P%d" % (level + 1)],
                                                                       scope='build_P%d' % level)
                    for level in range(3, 1, -1):
                        pyramid_dict['P%d' % level] = slim.conv2d(pyramid_dict['P%d' % level],
                                                                  num_outputs=256, kernel_size=[3, 3], padding="SAME",
                                                                  stride=1, scope="fuse_P%d" % level)
            for level in range(4, 1, -1):
                add_heatmap(pyramid_dict['P%d' % level], name='Layer%d/P%d_heat' % (level, level))

            # return [P2, P3, P4,]
            print("we are in Pyramid::-======>>>>")
            print(cfgs.LEVLES)
            print("base_anchor_size are: ", cfgs.BASE_ANCHOR_SIZE_LIST)
            print(20 * "__")
        else:
            pyramid_dict['P4'] = C4
        # return feature_to_crop
        return [pyramid_dict[level_name] for level_name in cfgs.LEVLES]


def mobilenetv2_head(inputs, is_training=True):
    with slim.arg_scope(mobilenetv2_scope(is_training=is_training, trainable=True)):
        net, _ = mobilenet_v2.mobilenet(input_tensor=inputs,
                                        num_classes=None,
                                        is_training=False,
                                        depth_multiplier=1.0,
                                        scope='MobilenetV2',
                                        conv_defs=V2_HEAD_DEF,
                                        finegrain_classification_mode=False)

        net = tf.squeeze(net, [1, 2])

        return net

def mobilenetv2_head_with_relation_module(inputs,rois, is_training=True):
    with slim.arg_scope(mobilenetv2_scope(is_training=is_training, trainable=True)):
        # inputs = tf.Print(inputs, [tf.shape(inputs)], message='inputs')
        net, _ = mobilenet_v2.mobilenet(input_tensor=inputs,
                                        num_classes=None,
                                        is_training=False,
                                        depth_multiplier=1.0,
                                        scope='MobilenetV2',
                                        conv_defs=V2_HEAD_DEF,
                                        finegrain_classification_mode=False)

        net = tf.squeeze(net, [1, 2])
        # net = tf.Print(net, [tf.shape(net)], message='net')
        rm1 = RelationModule(net, rois,1024)
        rm1 = rm1.result
        # rm1 = tf.Print(rm1, [tf.shape(rm1)], message='rm1')
        rm1 = tf.reshape(rm1, [-1, 1024])
        return rm1