from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from keras.layers import Conv2D, BatchNormalization, PReLU, Dropout, MaxPooling2D, GlobalAveragePooling2D
from keras.layers import Activation, Flatten, Dense, AveragePooling2D, SeparableConv2D, DepthwiseConv2D,concatenate
from keras import Input
from keras import layers
import keras.backend as k
from keras.models import Model


class Multi_models(object):

    def __init__(self, CLASS, size):
        self._NUM_CLASS = CLASS
        self._num_istrainning = 81
        self.img_size = size
        # 两个Kernel的初始化器
        self.CONV_KERNEL_INITIALIZER = {'class_name': 'VarianceScaling',
                                        'config': {'scale': 2.0, 'mode': 'fan_out', 'distribution': 'normal'}}

        self.DENSE_KERNEL_INITIALIZER = {'class_name': 'VarianceScaling',
                                         'config': {'scale': 1. / 3., 'mode': 'fan_out', 'distribution': 'uniform'}}
    """
        BatchNormaliation封装函数
    """
    def BN_prelu(self, x):
        x = BatchNormalization()(x)
        x = PReLU()(x)
        return x

    """
        简单的卷积网络函数
    """
    def cnn_model(self):
        input_shape = (self.img_size, self.img_size, 1)
        inputs_dim = Input(input_shape)

        x = Conv2D(32, (3, 3), strides=(2, 2), padding='valid')(inputs_dim)
        x = self.BN_prelu(x)
        x = Conv2D(32, (3, 3), strides=(1, 1), padding='valid')(x)
        x = self.BN_prelu(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

        x = Conv2D(64, (3, 3), strides=(1, 1), padding='valid')(x)
        x = self.BN_prelu(x)
        x = Conv2D(64, (3, 3), strides=(1, 1), padding='valid')(x)
        x = self.BN_prelu(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

        x = Conv2D(128, (3, 3), strides=(1, 1), padding='valid')(x)
        x = self.BN_prelu(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)
        x = Conv2D(128, (3, 3), strides=(1, 1), padding='valid')(x)
        x = self.BN_prelu(x)
        x = AveragePooling2D(pool_size=(2, 2))(x)

        x = Flatten()(x)

        x = Dense(512)(x)
        x = self.BN_prelu(x)
        x = Dropout(0.7)(x)

        x = Dense(self._NUM_CLASS, activation="softmax")(x)

        model = Model(inputs=inputs_dim, outputs=x)
        # model.summary()
        return model

    # mobile
    """
        relu6激活函数
    """
    def relu6(self, x):
        return k.relu(x, max_value=6)

    """
        mobilenet最重要的部分，深度可分离卷积：将传统卷积分两步进行(depthwise和pointwise)
        此部分的计算量相比于传统卷积，少了8到9倍;
    """
    def _depthwise_conv_block(self, inputs, pointwise_conv_filters,
                              depth_multiplier=1, strides=(1, 1), block_id=1):

        x = DepthwiseConv2D((3, 3),
                            padding='same',
                            depth_multiplier=depth_multiplier,
                            strides=strides,
                            use_bias=False,
                            name='conv_dw_%d' % block_id)(inputs)

        x = BatchNormalization(name='conv_dw_%d_bn' % block_id)(x)
        x = Activation(self.relu6, name='conv_dw_%d_relu' % block_id)(x)

        x = Conv2D(pointwise_conv_filters, (1, 1),
                   padding='same',
                   use_bias=False,
                   strides=(1, 1),
                   name='conv_pw_%d' % block_id)(x)
        x = BatchNormalization(name='conv_pw_%d_bn' % block_id)(x)
        return Activation(self.relu6, name='conv_pw_%d_relu' % block_id)(x)

    """
        此函数为MobileNet网络的实现，考虑到本人显卡的内存，中间有几层深度分离卷积注释掉了
    """
    def mobile(self, is_save=True, depth_multiplier=1, dropout=1e-2):
        input_dims = Input(shape=[self.img_size, self.img_size, 1])
        x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1),
                   padding="same", use_bias=False, name="conv1")(input_dims)
        x = self._depthwise_conv_block(x, 64, depth_multiplier, block_id=1)
        x = self._depthwise_conv_block(x, 128, depth_multiplier, strides=2, block_id=2)
        # x = self._depthwise_conv_block(x, 128, depth_multiplier, block_id=3)
        x = self._depthwise_conv_block(x, 256, depth_multiplier, strides=2, block_id=4)
        # x = self._depthwise_conv_block(x, 256, depth_multiplier, block_id=5)
        x = self._depthwise_conv_block(x, 512, depth_multiplier, strides=2, block_id=6)

        for i in range(2):
            x = self._depthwise_conv_block(x, 512, depth_multiplier, block_id=7+i)

        x = self._depthwise_conv_block(x, 1024, depth_multiplier, strides=2, block_id=12)
        # x = self._depthwise_conv_block(x, 1024, depth_multiplier, block_id=13)

        x = GlobalAveragePooling2D()(x)
        x = layers.Reshape((1, 1, 1024), name="reshape_1")(x)
        x = Dropout(dropout, name="dropout")(x)
        x = Conv2D(self._NUM_CLASS, (1, 1), padding="same", name="conv_preb")(x)
        x = Activation("softmax", name="act_softmax")(x)
        x = layers.Reshape((self._NUM_CLASS,), name="reshape_2")(x)

        model = Model(input_dims, x, name="mobilenet_1_0_128_tf")
        return model

    """ 
        DenseNet网络结构
    """
    # 将BN——激活——1x1卷积写在同一个函数
    def BN_act_conv_drop(self, x, num_out, ksize):
        x = self.BN_prelu(x)
        x = Conv2D(num_out, kernel_size=ksize, strides=(1, 1), padding='same')(x)
        return x

    """
        DenseNet网络最重要的部分，dense_block层：通过前面封装的函数，将1x1和3x3的卷积进行连接
    """
    def dense_block(self, x, layers, grouth):
        for i in range(layers):
            _1x1 = self.BN_act_conv_drop(x, 4 * grouth, (1, 1))
            _3x3 = self.BN_act_conv_drop(_1x1, grouth, (3, 3))
            x = concatenate([x, _3x3], axis=3)
        return x

    """ 
        将1x1的卷积与平均最大池化封装为一个函数
    """
    def transition(self, x, num_out):
        x = self.BN_act_conv_drop(x, num_out, (1, 1))
        x = AveragePooling2D(pool_size=(2, 2))(x)
        return x
    """
        对transition层进行降为操作
    """
    def reduce_dim(self, x):
        compression_rate = 0.5
        return int(int(x.shape[-1]) * compression_rate)
    """
        DenseNet网络结构，根据论文中的网络结构表的dense-121实现
        这里考虑到本人显卡内存，将论文中的Dense_block进行缩减，只保留一定的层数
        由于DenseNet最重要的就是dense_block层数的加深，所以训练结果不是非常高
    """
    def Dense_mdoel(self, input_shape=(128, 128, 1)):
        input_dims = Input(input_shape)
        grouth = 24
        x = Conv2D(2 * grouth, kernel_size=(7, 7), strides=(2, 2), padding='same')(input_dims)
        x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)

        # 1-block + transition
        x = self.dense_block(x, 3, grouth)
        x = self.transition(x, self.reduce_dim(x))

        # 2-block + transition
        x = self.dense_block(x, 3, grouth)
        x = self.transition(x, self.reduce_dim(x))

        # 3-block + transition
        x = self.dense_block(x, 6, grouth)
        x = self.transition(x, self.reduce_dim(x))

        # 4-block
        x = self.dense_block(x, 4, grouth)
        x = self.BN_prelu(x)
        x = Flatten()(x)
        x = Dense(self._NUM_CLASS)(x)
        x = Activation('softmax')(x)

        model = Model(inputs=input_dims, outputs=x)
        return model

    """
        本来想用xception进行迁移学习，但电脑计算能力有限，再加上xception网络也比较复杂，
        本人对论文掌握不够，所以没有训练。
        下面时xception的实现代码，有兴趣的同学可以试试
    """
    def x_inception(self):
        input_shape = (self.img_size, self.img_size, 1)
        input_dims = Input(input_shape)
        # Entry_flow
        # block1
        x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')(input_dims)
        x = BatchNormalization(name="block1_conv1_bn")(x)
        x = Activation('relu', name='bock1_conv1_act')(x)
        x = Conv2D(64, (3, 3), use_bias=False, name="block1_conv2")(x)
        x = BatchNormalization(name="block1_conv2_bn")(x)
        x = Activation('relu', name="block1_conv2_act")(x)

        # block2
        # 维度64 --> 128 --> 64
        residual = Conv2D(128, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
        residual = BatchNormalization()(residual)

        #
        x = SeparableConv2D(128, (3, 3), padding="same", use_bias=False, name="block2_separe1")(x)
        x = BatchNormalization(name="block2_separe1_bn")(x)
        x = Activation('relu', name="block2_separe1_act")(x)
        x = SeparableConv2D(128, (3, 3), padding="same", use_bias=False, name="block2_separe2")(x)
        x = BatchNormalization(name="block2_separe2_bn")(x)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding="same", name="block2_pool")(x)
        x = layers.add([x, residual])

        # block3
        residual = Conv2D(256, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
        residual = BatchNormalization()(residual)

        x = Activation('relu', name="block3_act")(x)
        x = SeparableConv2D(256, (3, 3), padding="same", use_bias=False, name="block3_separe1")(x)
        x = BatchNormalization(name="block3_separe1_bn")(x)
        x = Activation('relu', name="block3_separe1_act")(x)
        x = SeparableConv2D(256, (3, 3), padding="same", use_bias=False, name="block3_separe2")(x)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding="same", name="block3_pool")(x)
        x = layers.add([x, residual])

        # block4
        residual = Conv2D(728, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
        residual = BatchNormalization()(residual)

        x = Activation('relu', name="block4_act")(x)
        x = SeparableConv2D(728, (3, 3), padding="same", use_bias=False, name="block4_separe1")(x)
        x = BatchNormalization(name="block4_separe1_bn")(x)
        x = Activation('relu', name="block4_separe1_act")(x)
        x = SeparableConv2D(728, (3, 3), padding="same", use_bias=False, name="block4_separe2")(x)
        x = BatchNormalization(name="block4_separe2_bn")(x)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding="same", name="block4_pool")(x)
        x = layers.add([x, residual])

        # block5 -- block12  ---> Middle_flow
        for i in range(8):
            residual = x
            prefix = "block" + str(i+5)

            x = Activation('relu', name=prefix + "_act")(x)
            x = SeparableConv2D(728, (3, 3), padding="same", use_bias=False, name=prefix + "_speare1")(x)
            x = BatchNormalization(name=prefix+'_separe1_bn')(x)
            x = Activation('relu', name=prefix+'_separe1_act')(x)
            x = SeparableConv2D(728, (3, 3), padding="same", use_bias=False, name=prefix + "_speare2")(x)
            x = BatchNormalization(name=prefix + '_separe2_bn')(x)
            x = Activation('relu', name=prefix + '_separe2_act')(x)
            x = SeparableConv2D(728, (3, 3), padding="same", use_bias=False, name=prefix + "_speare3")(x)
            x = BatchNormalization(name=prefix + '_separe3_bn')(x)
            x = Activation('relu', name=prefix + '_separe3_act')(x)

            x = layers.add([x, residual])

        # Exit_flow
        # block13
        residual = Conv2D(1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
        residual = BatchNormalization()(residual)

        x = Activation('relu', name='block13_separe1_act')(x)
        x = SeparableConv2D(728, (3, 3), padding='same', use_bias=False, name='block13_separe1')(x)
        x = BatchNormalization(name='block13_separe1_bn')(x)
        x = Activation('relu', name='block13_separe2_act')(x)
        x = SeparableConv2D(1024, (3, 3), padding='same', use_bias=False, name='block13_separe2')(x)
        x = BatchNormalization(name='block13_separe2_bn')(x)

        x = MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
        x = layers.add([x, residual])

        # block14
        x = SeparableConv2D(1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
        x = BatchNormalization(name='block14_sepconv1_bn')(x)
        x = Activation('relu', name='block14_sepconv1_act')(x)

        x = SeparableConv2D(2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
        x = BatchNormalization(name='block14_sepconv2_bn')(x)
        x = Activation('relu', name='block14_sepconv2_act')(x)

        x = GlobalAveragePooling2D(name='avg_pool')(x)
        x = Dense(self._NUM_CLASS, activation='softmax', name='predictions')(x)

        inputs = input_dims
        model = Model(inputs, x, name="xception")
        # load_name = "logs/xception_weights_tf_dim_ordering_tf_kernels_notop.h5"
        # model.load_weights(load_name, by_name=True)
        # if is_save:
        #     for layer in model.layers[:-5]:
        #         layer.trainable = False
        #     model.save_weights("logs/first.h5")
        # else:
        #     for i in range(len(model.layers)):
        #         model.layers[i].trainable = True
        #     model.save_weights("logs/last.h5")

        return model


if __name__ == "__main__":
    models = Multi_models(CLASS=100, size=128)
    model = models.x_inception()
    model.summary()