"""
内容：多输入多输出模型与共享网络层
日期：2020年7月8日
作者：Howie
"""
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, Input, Concatenate, Add, \
    GlobalAveragePooling2D, Dense, Multiply
from keras.utils import plot_model

IMG_DIM = 28
N_CHANNELS = 1
GAMMA = 4

class Inception:
    """
    Inception 模型
    有关 Inception 结构的更多信息，请参阅 <Going Deeper with Convolutions>。
    """

    def __init__(self):
        # 输入层
        input_img = Input(shape=(IMG_DIM, IMG_DIM, N_CHANNELS), name='Input')
        # 分支1: 降低维度；修正线性激活
        tower_1 = Conv2D(
            filters=64,
            kernel_size=(
                1,
                1),
            padding='same',
            activation='relu', name='T1_Conv2D_1x1')(input_img)
        # 分支2
        tower_2_1 = Conv2D(
            filters=96,
            kernel_size=(
                1,
                1),
            padding='same',
            activation='relu', name='T2_Conv2D_1x1')(input_img)
        tower_2_2 = Conv2D(
            filters=128,
            kernel_size=(
                3,
                3),
            padding='same',
            activation='relu', name='Conv2D_3x3')(tower_2_1)
        # 分支3
        tower_3_1 = Conv2D(
            filters=16,
            kernel_size=(
                1,
                1),
            padding='same',
            activation='relu', name='T3_Conv2D_1x1')(input_img)
        tower_3_2 = Conv2D(
            filters=32,
            kernel_size=(
                5,
                5),
            padding='same',
            activation='relu', name='Conv2D_5x5')(tower_3_1)
        # 分支4
        tower_4_1 = MaxPooling2D(
            pool_size=(
                3, 3), strides=(
                1, 1), padding='same', name='MaxPool_3x3')(input_img)
        tower_4_2 = Conv2D(
            filters=32,
            kernel_size=(
                1,
                1),
            padding='same',
            activation='relu', name='T4_Conv2D_1x1')(tower_4_1)
        # 过滤器级联
        output = Concatenate(axis=3, name='Depth_Concat')(
            [tower_1, tower_2_2, tower_3_2, tower_4_2])

        self.model = Model(inputs=input_img, outputs=output)
        plot_model(
            model=self.model,
            to_file='./logs/Demo4_Inception.pdf',
            show_shapes=True)


class ResidualBlock:
    """
    卷积层上的残差块
    有关残差网络 (Residual Network) 的更多信息，
    请参阅 <Deep Residual Learning for Image Recognition>。
    """
    def __init__(self):
        # 输入层
        x = Input(shape=(IMG_DIM, IMG_DIM, N_CHANNELS), name='Input')
        # 权值参数层
        y = Conv2D(filters=3, kernel_size=(3, 3), padding='same')(x)
        # 输出
        output = Add()([x, y])
        self.model = Model(inputs=x, outputs=output)
        plot_model(
            model=self.model,
            to_file='./logs/Demo4_ResidualBlock.pdf',
            show_shapes=True)

class SqueezeExcitationModule:
    """
    Squeeze-Excitaion模块
    有关SE网络 (SE-Network) 的更多信息，
    请参阅相关论文。
    """
    def __init__(self):
        # 输入层：经过常规卷积变换之后的特征图
        input_img = Input(shape=(28, 28, 32), name='Input')
        # Squeeze: Global Information Embedding（对每个通道使用全局平均池化）
        squeeze_tensor = GlobalAveragePooling2D()(input_img)
        # Excitation: Adaptive Recalibration
        # 两个全连接层形成BottleNeck结构，引入降维比例gamma(= reduction_ratio)
        fc_out1 = Dense(units=32 // GAMMA, activation='relu')(squeeze_tensor)
        fc_out2 = Dense(units=32, activation='sigmoid')(fc_out1)
        scale = Multiply()([input_img, fc_out2])
        self.model = Model(inputs=input_img, outputs=scale)
        plot_model(
            model=self.model,
            to_file='./logs/Demo4_SqueezeExcitationModule.pdf',
            show_shapes=True)