# -*- coding: utf-8 -*-
"""
******* 文档说明 ******
计算机视觉中的注意力机制（Visual Attention）
https://mp.weixin.qq.com/s/KKlmYOduXWqR74W03Kl-9A

# 当前项目: Cifar10-Classification
# 创建时间: 2019/6/22 13:38
# 开发作者: vincent
# 创建平台: PyCharm Community Edition
# 版    本: V1.0
"""
from tensorflow import keras
import tensorflow as tf
Input = keras.layers.Input
ZeroPadding2D = keras.layers.ZeroPadding2D
Activation = keras.layers.Activation
MaxPooling2D = keras.layers.MaxPooling2D
add = keras.layers.add
Dense = keras.layers.Dense
Conv2D = keras.layers.Conv2D
BatchNormalization = keras.layers.BatchNormalization
Flatten = keras.layers.Flatten

Model = keras.models.Model


def identity_block(input_tensor, kernel_size, filters, stage, block, trainable=True):
    """The identity block is the block that has no conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
            middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names

    # Returns
        Output tensor for the block.
    """
    filters1, filters2, filters3 = filters
    bn_axis = 3
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1),
               name=conv_name_base + '2a', trainable=trainable)(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2, kernel_size,
               padding='same', name=conv_name_base + '2b', trainable=trainable)(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c', trainable=trainable)(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = add([x, input_tensor])
    x = Activation('relu')(x)
    return x


def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), trainable=True):
    """A block that has a conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
            middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        strides: Strides for the first conv layer in the block.

    # Returns
        Output tensor for the block.

    Note that from stage 3,
    the first conv layer at main path is with strides=(2, 2)
    And the shortcut should have strides=(2, 2) as well
    """
    filters1, filters2, filters3 = filters
    bn_axis = 3
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1), strides=strides,
               name=conv_name_base + '2a', trainable=trainable)(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2, kernel_size, padding='same',
               name=conv_name_base + '2b', trainable=trainable)(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c', trainable=trainable)(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    shortcut = Conv2D(filters3, (1, 1), strides=strides,
                      name=conv_name_base + '1', trainable=trainable)(input_tensor)
    shortcut = BatchNormalization(
        axis=bn_axis, name=bn_name_base + '1')(shortcut)

    x = add([x, shortcut])
    x = Activation('relu')(x)
    return x


# 通道阈 注意力模型
def channel_attention(inputs, squeeze=16):
    """
    https://arxiv.org/abs/1709.01507

    SENet的核心思想在于通过网络根据loss去学习特征权重，
    使得有效的feature map权重大，无效或效果小的feature map权重小的方式训练模型达到更好的结果。

    :param inputs:   输入张量
    :param squeeze:  缩放参数  为了减少channel个数从而降低计算量
    :return:
    """
    x = keras.layers.GlobalAveragePooling2D()(inputs)
    x = keras.layers.Dense(int(x.shape[-1]) // squeeze, use_bias=False, activation='relu', name='CA_fc1')(x)
    x = keras.layers.Dense(int(inputs.shape[-1]), use_bias=False, activation='sigmoid', name='CA_fc2')(x)
    return keras.layers.Multiply()([inputs, x])  # 给通道加权重


# 空间阈 注意力模型
def spatial_attention(inputs):
    """
    :param inputs:   输入张量
    :return:
    """
    tensor_x = keras.layers.Conv2D(4, 3, strides=1, padding='same', activation='relu', name='SA_con1')(inputs)
    tensor_x = keras.layers.Conv2D(1, 3, strides=1, padding='same', activation='sigmoid', name='SA_con2')(tensor_x)
    return keras.layers.Multiply()([inputs, tensor_x])  # 给通道加权重


# 创建模型
def inference(input_shape, output_shape, trainable=False, drop_out=None):
    """
    :param input_shape:   输入数据大小
    :param output_shape:  输出数据大小
    :param trainable      参数是否可重新训练
    :param drop_out       Drop Out 比例， 为 None 时则不使用 DropOut
    :return:
    """
    del drop_out

    # This returns a tensor
    img_input = Input(name='img_input', shape=(input_shape, input_shape, 3))

    # ################################################  ResNet 特征提取
    with tf.name_scope('ResNet'):
        x = ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
        x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', name='conv1', trainable=trainable)(x)

        x = BatchNormalization(axis=3, name='bn_conv1')(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2))(x)

        x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=trainable)
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=trainable)
        x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=trainable)

        # x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=trainable)
        # x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=trainable)
        # x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=trainable)
        # x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=trainable)
        #
        # x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=trainable)
        # x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=trainable)
        # x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=trainable)
        # x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=trainable)
        # x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=trainable)
        # x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=trainable)
        #
        # x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
        # x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
        # x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    # ################################################  分类器
    with tf.name_scope('STN'):
        x = spatial_attention(x)

    with tf.name_scope('CTN'):
        x = channel_attention(x)

    x = Conv2D(64, (1, 1), strides=(2, 2), padding='valid', name='my_conv1')(x)
    x = Conv2D(8, (1, 1), strides=(2, 2), padding='valid', name='my_conv2')(x)

    # x = AveragePooling2D((7, 7), name='avg_pool')(x)
    x = Flatten()(x)
    predictions = Dense(output_shape, activation='softmax', name='my_fc2')(x)

    # 创建模型
    model = Model(inputs=img_input, outputs=predictions, name='ResNet_model')

    return model


if __name__ == '__main__':
    _model = inference(224, 10, trainable=False, drop_out=None)
    # 打印模型结构
    _model.summary()
    import json
    print('\nJSON Model Config:\n',
          json.dumps(json.loads(_model.to_json()), ensure_ascii=False, sort_keys=True, indent=4))
