import tensorflow as tf


def h_sigmoid(x):
    return tf.nn.relu6(x + 3) * 0.16667


def h_swish(x):
    return x * h_sigmoid(x)


class Identify(tf.keras.layers.Layer):
    def __init__(self):
        super(Identify, self).__init__(name="Identify")

    def call(self, input, training=None):
        return input

"""
class SEBlock(tf.keras.layers.Layer):
    def __init__(self, input_channels, r=4):
        super(SEBlock, self).__init__()
        self.pool = tf.keras.layers.GlobalAveragePooling2D()
        self.fc1 = tf.keras.layers.Dense(units=input_channels // r)
        self.fc2 = tf.keras.layers.Dense(units=input_channels)

    def call(self, inputs, training=None, mask=None):
        branch = self.pool(inputs)
        branch = self.fc1(branch)
        branch = tf.nn.relu(branch)
        branch = self.fc2(branch)
        branch = h_sigmoid(branch)
        branch = tf.expand_dims(input=branch, axis=1)
        branch = tf.expand_dims(input=branch, axis=1)
        output = inputs * branch
        return output
"""


class SEBlock(tf.keras.layers.Layer):
    def __init__(self, input_channels, r=4, l2_reg=1e-5):
        super(SEBlock, self).__init__()
        self.input_channels = input_channels
        self.dconv = tf.keras.layers.Conv2D(filters=input_channels//r,
                                            kernel_size=(1, 1),
                                            strides=1,
                                            padding="same",
                                            use_bias=False,
                                            kernel_regularizer=tf.keras.regularizers.l2(l2_reg),
                                            name='Squeeze')

        self.uconv = tf.keras.layers.Conv2D(filters=input_channels,
                                            kernel_size=(1, 1),
                                            strides=1,
                                            padding='same',
                                            use_bias=False,
                                            kernel_regularizer=tf.keras.regularizers.l2(l2_reg),
                                            name='Excite')

    def call(self, inputs, training=None, **kargs):
        x = tf.reduce_mean(inputs, (1, 2), keepdims=True)
        x = self.dconv(x)
        x = tf.nn.relu(x)
        x = self.uconv(x)
        x = h_sigmoid(x)

        return inputs * x
#"""


class BottleNeck(tf.keras.layers.Layer):
    def __init__(self, in_size, exp_size, out_size, s, is_se_existing, NL, k, l2_reg=1e-5, **kwargs):
        super(BottleNeck, self).__init__(**kwargs)
        self.stride = s
        self.in_size = in_size
        self.out_size = out_size
        self.is_se_existing = is_se_existing

        self.conv1 = tf.keras.layers.Conv2D(filters=exp_size,
                                            kernel_size=(1, 1),
                                            strides=1,
                                            padding="same",
                                            use_bias=False)
        self.bn1 = tf.keras.layers.BatchNormalization()

        self.dwconv = tf.keras.layers.DepthwiseConv2D(kernel_size=(k, k),
                                                      strides=s,
                                                      padding="same",
                                                      use_bias=False,
                                                      depthwise_regularizer=tf.keras.regularizers.l2(l2_reg),
                                                      name='depthwise')
        self.bn2 = tf.keras.layers.BatchNormalization()
        if is_se_existing:
            self.se = SEBlock(input_channels=exp_size)
        else:
            self.se = Identify()
        self.conv2 = tf.keras.layers.Conv2D(filters=out_size,
                                            kernel_size=(1, 1),
                                            strides=1,
                                            padding="same",
                                            use_bias=False,
                                            kernel_regularizer=tf.keras.regularizers.l2(l2_reg))
        self.bn3 = tf.keras.layers.BatchNormalization()

        if NL == "HS":
            self.act = h_swish
        elif NL == "RE":
            self.act = tf.nn.relu
        else:
            self.act = None

    def call(self, inputs, training=None, mask=None):
        # 1x1 conv2d expand conv [in -> exp]
        x = self.conv1(inputs)
        x = self.bn1(x, training=training)
        x = self.act(x)

        # depth wise [exp -> exp]
        x = self.dwconv(x)
        x = self.bn2(x, training=training)

        # opt Squeeze and Excite [exp -> exp]
        x = self.se(x, training=training)

        # 1x1 conv2d point wise [exp -> out]
        x = self.conv2(x)
        x = self.bn3(x, training=training)
        x = self.act(x)

        # opt short cut
        if self.stride == 1 and self.in_size == self.out_size:
            return tf.keras.layers.add([x, inputs])
        else:
            return x


class ConvBlock(tf.keras.layers.Layer):
    """Convolution Block
    This class defines a 2D convolution operation with BN and activation.
    # Arguments
        # init
            filters: Integer, the dimensionality of the output space.
            kernel: An integer or tuple/list of 2 integers, specifying the
                width and height of the 2D convolution window. Default=(3,3)
            strides: An integer or tuple/list of 2 integers,
                specifying the strides of the convolution along the width and height.
                Can be a single integer to specify the same value for
                all spatial dimensions. Default=1
            nl: String, nonlinearity activation type. Default='RE'
            channel_axis: 1 if channels are first in the image and -1 if the last. Default=-1
            padding_scheme: Padding scheme to apply for convolution. Default='same'
        # call
            x: Tensor, input tensor of conv layer.
            training: Mode for training-aware layers
    # Returns
        Output tensor.
    """
    def __init__(self, filters, kernel_size=(3, 3), strides=1, padding='same', use_bias=False, activation=tf.nn.relu, use_bn=True, l2_reg=1e-5, **kwargs):
        super(ConvBlock, self).__init__(**kwargs)
        self.conv = tf.keras.layers.Conv2D(filters, kernel_size, padding=padding, strides=strides, use_bias=use_bias, kernel_regularizer=tf.keras.regularizers.l2(l2_reg))
        if activation:
            self.act = activation
        else:
            self.act = Identify()

        if use_bn:
            self.bn = tf.keras.layers.BatchNormalization(momentum=0.99)
        else:
            self.bn = Identify()

    def call(self, x, training=None, mask=None):
        x = self.conv(x)
        # Remove 'training' argument to convert to TFLite
        x = self.bn(x, training=training)
        return self.act(x)


class AttentionConvolution(tf.keras.layers.Layer):
    def __init__(self, size, theta=0.5, *args, **kwargs):
        super(AttentionConvolution, self).__init__(*args, **kwargs)
        assert len(size) == 2 and size[0] > 0 and size[1] > 0
        self.aconv = tf.keras.layers.Conv2D(filters=1, kernel_size=(1, 1), strides=1, padding='same',
                                            use_bias=False, activation=tf.nn.softmax)
        self.size = size
        self.theta = theta

    def call(self, inputs, images, **kwargs):
        mask = self.aconv(inputs, training=training)
        mask = tf.math.l2_normalize(mask, axis=0)

        imask = tf.where(mask < self.theta, tf.zeros_like(mask), mask)
        
        images = tf.image.resize(images, self.size)
        images = images * imask
        return tf.concat([images, inputs], axis=-1)