import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import backend


# ----------MHSA--------------------------#todo:MHSA block
class MHSA(layers.Layer):
    def __init__(self,**kwargs):
        super(MHSA, self).__init__(**kwargs)

    def _get_process_layer(self, batch, channels):
        def input_process_layer(input):
            """
            input shape like (batch,w,h,c) this function return shape like (batch,c,w*h)
            :param input:
            :param batch:
            :param channels:
            :return:
            """
            input = tf.transpose(input, [0, 3, 1, 2])
            input = tf.reshape(input, [batch, channels, -1])
            return input

        return input_process_layer

    def build(self, input_shape):
        n_batch, width, height, C = input_shape
        self.batch = n_batch
        self.rel_h = self.add_weight("rel_h", [1, width, 1, C], initializer="random_normal", trainable=True)
        self.rel_w = self.add_weight("rel_w", [1, 1, height, C], initializer="random_normal", trainable=True)

        self.q_conv = layers.Conv2D(filters=C, kernel_size=(1, 1))
        self.k_conv = layers.Conv2D(filters=C, kernel_size=(1, 1))
        self.v_conv = layers.Conv2D(filters=C, kernel_size=(1, 1))
        self.input_process_layer = self._get_process_layer(n_batch, C)

        self.content_position_reshape = layers.Reshape(target_shape=[-1, C])
        self.out_reshape = layers.Reshape(target_shape=[width, height, C])

        self.softmax = layers.Activation(tf.nn.softmax)

    def call(self, inputs, **kwargs):
        """
        input shape like (b,w,h,c)
        :param input:
        :return:
        """
        q = self.input_process_layer(self.q_conv(inputs))  # (b,c,w*h)
        k = self.input_process_layer(self.k_conv(inputs))  # (b,c,w*h)
        v = self.input_process_layer(self.k_conv(inputs))  # (b,c,w*h)

        content_content = tf.transpose(q, [0, 2, 1])  # (b,w*h,c)
        content_content = tf.matmul(content_content, k)  # (w*h,w*h)

        content_position = self.rel_h + self.rel_w  # (1,w,h,c)
        content_position = self.content_position_reshape(content_position)  # (1,w*h,c)
        content_position = tf.tile(content_position,[self.batch,1,1])
        content_position = tf.matmul(content_position, q)  # shape like (w*h,w*h)

        energy = layers.add([content_content, content_position])
        attention = self.softmax(energy)

        out = tf.matmul(v, attention)  # shape like (b,c,w*h)
        out = tf.transpose(out, [0, 2, 1])  # shape like (b,w*h,c)
        out = self.out_reshape(out)
        return out

    def get_config(self):
        config = super(MHSA, self).get_config()
        return config

    # There's actually no need to define `from_config` here, since returning
    # `cls(**config)` is the default behavior.
    @classmethod
    def from_config(cls, config):
        return cls(**config)


def identity_block_MHSA(input_tensor, kernel_size, filters, stage, block):
    filters1, filters2, filters3 = filters
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.Conv2D(filters1, (1, 1),
                      name=conv_name_base + '2a')(input_tensor)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    # x = layers.Conv2D(filters2, kernel_size,
    #                   padding='same', name=conv_name_base + '2b')(x)
    x = MHSA()(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = SE_block(x)

    x = layers.add([x, input_tensor])
    x = layers.Activation('relu')(x)
    return x


def conv_block_MHSA(input_tensor,
                    kernel_size,
                    filters,
                    stage,
                    block,
                    strides=(2, 2)):
    filters1, filters2, filters3 = filters
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.Conv2D(filters1, (1, 1), strides=strides,
                      name=conv_name_base + '2a')(input_tensor)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = MHSA()(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    shortcut = layers.Conv2D(filters3, (1, 1), strides=strides,
                             name=conv_name_base + '1')(input_tensor)
    shortcut = layers.BatchNormalization(
        axis=bn_axis, name=bn_name_base + '1')(shortcut)

    x = layers.add([x, shortcut])
    x = layers.Activation('relu')(x)
    return x


# ----------MHSA end--------------------------#


# ------------SE------------------------------#todo:SE block
def SE_expand_dims(input):
    out = tf.expand_dims(input,1)
    out = tf.expand_dims(out,1)
    return out
def SE_block(input_tensor, r=16):
    channels = input_tensor.shape[-1]
    x = layers.GlobalAveragePooling2D()(input_tensor)
    x = layers.Dense(channels // r)(x)
    x = layers.Activation(activation=tf.nn.relu)(x)
    x = layers.Dense(channels)(x)
    x = layers.Activation(activation=tf.nn.sigmoid)(x)
    x = layers.Lambda(function=SE_expand_dims)(x)
    out = layers.Multiply()([input_tensor, x])
    return out


def conv_block_SE(input_tensor,
                  kernel_size,
                  filters,
                  stage,
                  block,
                  strides=(2, 2)):
    filters1, filters2, filters3 = filters
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.Conv2D(filters1, (1, 1), strides=strides,
                      name=conv_name_base + '2a')(input_tensor)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters2, kernel_size, padding='same',
                      name=conv_name_base + '2b')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = SE_block(x)

    shortcut = layers.Conv2D(filters3, (1, 1), strides=strides,
                             name=conv_name_base + '1')(input_tensor)
    shortcut = layers.BatchNormalization(
        axis=bn_axis, name=bn_name_base + '1')(shortcut)

    x = layers.add([x, shortcut])
    x = layers.Activation('relu')(x)
    return x


def identity_block_SE(input_tensor, kernel_size, filters, stage, block):
    filters1, filters2, filters3 = filters
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.Conv2D(filters1, (1, 1),
                      name=conv_name_base + '2a')(input_tensor)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters2, kernel_size,
                      padding='same', name=conv_name_base + '2b')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = SE_block(x)

    x = layers.add([x, input_tensor])
    x = layers.Activation('relu')(x)
    return x


# -------------SE end-----------------------#

# -------------resnet----------------------#todo:resnet block

def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2)):
    filters1, filters2, filters3 = filters
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.Conv2D(filters1, (1, 1), strides=strides,
                      name=conv_name_base + '2a')(input_tensor)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters2, kernel_size, padding='same',
                      name=conv_name_base + '2b')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    shortcut = layers.Conv2D(filters3, (1, 1), strides=strides,
                             name=conv_name_base + '1')(input_tensor)
    shortcut = layers.BatchNormalization(
        axis=bn_axis, name=bn_name_base + '1')(shortcut)

    x = layers.add([x, shortcut])
    x = layers.Activation('relu')(x)
    return x


def identity_block(input_tensor, kernel_size, filters, stage, block):
    """The identity block is the block that has no conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
            middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names

    # Returns
        Output tensor for the block.
    """
    filters1, filters2, filters3 = filters
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.Conv2D(filters1, (1, 1),
                      name=conv_name_base + '2a')(input_tensor)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters2, kernel_size,
                      padding='same', name=conv_name_base + '2b')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = layers.add([x, input_tensor])
    x = layers.Activation('relu')(x)
    return x
# ------------------resnet end-----------------------------------#
