# -*- coding: UTF-8 -*-
from keras.models import Model
from keras.layers import Input, Conv2D, BatchNormalization, Activation, GlobalAveragePooling2D
from keras.layers import Reshape, add, Dropout, DepthwiseConv2D, ReLU
import tensorflow as tf
from keras import backend as K
from keras.utils.vis_utils import plot_model

def _conv_block(inputs, filters, kernel, strides):

    """Convolution Block
        This function defines a 2D convolution operation with BN and relu6.
        # Arguments
            inputs: Tensor, input tensor of conv layer.
            filters: Integer, the dimensionality of the output space.
            kernel: An integer or tuple/list of 2 integers, specifying the
                width and height of the 2D convolution window.
            strides: An integer or tuple/list of 2 integers,
                specifying the strides of the convolution along the width and height.
                Can be a single integer to specify the same value for
                all spatial dimensions.
        # Returns
            Output tensor.
    """

    channel_axis = 1 if K.image_data_format() == "channels_first" else -1

    x = Conv2D(filters, kernel, padding="same", strides=strides)(inputs)
    x = BatchNormalization(axis=channel_axis)(x)
    x = ReLU(max_value=6)(x)

    # return Activation(relu6)(x)
    return x

def _bottleneck(inputs, filters, kernel, t, s, residual=False):

    """Bottleneck
    This function defines a basic bottleneck structure.
    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        kernel: An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        r: Boolean, Whether to use the residuals.
    # Returns
        Output tensor.
    """

    channel_axis = 1 if K.image_data_format() == "channels_first" else -1
    tchannel = K.int_shape(inputs)[channel_axis] * t

    # (1,1) kernel for dimenion expasion
    x = _conv_block(inputs, tchannel, (1,1), (1,1))

    # depthwise convolution
    x = DepthwiseConv2D(kernel, strides=(s,s), depth_multiplier=1, padding="same")(x)
    x = BatchNormalization(axis=channel_axis)(x)
    # x = Activation(relu6)(x)
    x = ReLU(max_value=6)(x)

    # linear transform
    x = Conv2D(filters, (1,1), strides=(1,1), padding="same")(x)
    x = BatchNormalization(axis=channel_axis)(x)

    # residual
    if residual:
        return add([x, inputs])

    return x

def _inverted_residual_block(inputs, filters, k, t, s, n):

    """Inverted Residual Block
    This function defines a sequence of 1 or more identical layers.
    # Arguments
        inputs: Tensor, input tensor of conv layer.
        filters: Integer, the dimensionality of the output space.
        k: kernel, An integer or tuple/list of 2 integers, specifying the
            width and height of the 2D convolution window.
        t: Integer, expansion factor.
            t is always applied to the input size.
        s: strides, An integer or tuple/list of 2 integers,specifying the strides
            of the convolution along the width and height.Can be a single
            integer to specify the same value for all spatial dimensions.
        n: Integer, layer repeat times.
    # Returns
        Output tensor.
    """

    # first bottleneck, stride to downsample
    x = _bottleneck(inputs, filters, k, t, s)

    # repeating bottleneck, stride=1
    for i in range(1, n):
        x = _bottleneck(x, filters, k, t, 1, residual=True)

    return x

def MobileNet_v2(input_shape, k):
    inputs = Input(shape=input_shape)
    x = _conv_block(inputs, 32, (3,3), strides=(2,2))

    x = _inverted_residual_block(x, 16, (3,3), t=1, s=1, n=1)
    x = _inverted_residual_block(x, 24, (3, 3), t=6, s=2, n=1)
    x = _inverted_residual_block(x, 32, (3, 3), t=6, s=2, n=1)
    x = _inverted_residual_block(x, 64, (3, 3), t=6, s=2, n=1)
    x = _inverted_residual_block(x, 96, (3, 3), t=6, s=1, n=1)
    x = _inverted_residual_block(x, 160, (3, 3), t=6, s=2, n=1)
    x = _inverted_residual_block(x, 320, (3, 3), t=6, s=1, n=1)

    x = _conv_block(x, 1280, (1,1), strides=(1,1))
    x = GlobalAveragePooling2D()(x)
    x = Reshape((1, 1, 1280))(x)
    x = Dropout(0.5, name="dropout")(x)
    x = Conv2D(k, (1,1), padding="same")(x)

    x = Activation("softmax", name="softmax")(x)
    y = Reshape((k, ))(x)

    net = Model(inputs, y)
    plot_model(net, to_file="MobileNet_v2.png", show_shapes=False)

    return net

if __name__ == "__main__":

    net = MobileNet_v2((114, 114, 3), 1000)
    for layer in net.layers:
        print(layer.name)
