import tensorflow as tf
from tensorflow.keras import layers

'''
使用ResNet18网络 - 引入了层间残差跳连, 缓解了梯度消失, 使神经网络层数增加成为可能
ResNet作者认为, 单纯'堆叠'神经网络层数, 会使得神经网络退化, 以至于后边的特征丢失了前面特征的原本模样
ResNet块中的'+'是特征图对应元素值相加(两个矩阵对应元素做加法), 而Inception块的'+'是沿深度方向叠加(特征图叠加)
ResNet块中有两种情况:
    1. 特征map维度不同(经过卷积操作后改变了特征map数量): 需要调整(未经过卷积)操作的特征map, 
    借助1*1卷积结合步长调整
    2. 特征map维度相同: 直接对应元素做加法 
'''


class ResNetBlock(tf.keras.Model):
    def __init__(self, filters, strides=1, residual_path=False):
        """
        Python中的super(Net, self).__init__()是指首先找到**Net的父类**（比如是类NNet），
        然后把类Net的对象self转换为类NNet的对象，然后“被转换”的类NNet对象调用自己的init函数，
        其实简单理解就是子类把父类的__init__()放到自己的__init__()当中，
        这样子类就继承了父类的__init__()的那些东西
        :param filters: 卷积核数量
        :param strides: 提供默认值为1
        :param residual_path: 若值为True, 代表特征map维度不同(经过卷积操作后改变了特征map数量), 需要调整(未经过卷积)操作的特征map
        """
        super(ResNetBlock, self).__init__()
        self.filters = filters
        self.strides = strides
        self.residual_path = residual_path
        # layers
        self.cov_1 = layers.Conv2D(filters=filters, kernel_size=(3, 3),
                                   strides=strides, padding='same', use_bias=False)
        self.bn_1 = layers.BatchNormalization()
        self.ac_1 = layers.Activation('relu')
        self.cov_2 = layers.Conv2D(filters=filters, kernel_size=(3, 3),
                                   strides=1, padding='same', use_bias=False)
        self.bn_2 = layers.BatchNormalization()
        # 当residual_path为True时(图中虚线), 对输入进行下采样, 即用即用1x1的卷积核做卷积操作，
        # 保证x能和F(x)维度相同，顺利相加
        if residual_path:
            self.down_cov_1 = layers.Conv2D(filters=filters, kernel_size=(1, 1),
                                            strides=strides, padding='same', use_bias=False)
            self.down_bn_1 = layers.BatchNormalization()
        self.ac_2 = layers.Activation('relu')

    def call(self, inputs, training=None, mask=None):
        # residual等于本身, 是要和通过卷积操作的特征图相加的残差
        residual = inputs
        # 计算要通过卷积操作的特征图
        x = self.cov_1(inputs)
        x = self.bn_1(x)
        x = self.ac_1(x)

        x = self.cov_2(x)
        x = self.bn_2(x)

        if self.residual_path:
            residual = self.down_cov_1(inputs)
            residual = self.down_bn_1(residual)

        y = self.ac_2(x + residual)
        return y


class ResNet18(tf.keras.Model):
    def __init__(self, block_list, initial_filters=64):
        super(ResNet18, self).__init__()
        self.num_blocks = len(block_list)
        self.block_list = block_list
        self.out_filters = initial_filters
        self.cov_1 = layers.Conv2D(self.out_filters, (3, 3), strides=1, padding='same',
                                   use_bias=False, kernel_initializer='he_normal')
        self.bn_1 = layers.BatchNormalization()
        self.ac_1 = layers.Activation('relu')
        self.blocks = tf.keras.models.Sequential()
        # 构建ResNet网络结构, 8个ResNet block
        # 第几个ResNet block
        for block_id in range(self.num_blocks):
            # 第几个小(子)block块 (一次是添加两个小(子)块, 分实现跳连与虚线跳连)
            for sub_block in range(block_list[block_id]):
                # 对除第一个block以外的每个block的第一个小块输入进行下采样, 需指定步长(2)
                if block_id != 0 and sub_block == 0:
                    block = ResNetBlock(self.out_filters, strides=2, residual_path=True)
                else:
                    block = ResNetBlock(self.out_filters, residual_path=False)
                self.blocks.add(block)
            # 下一个block的卷积核数是上一个的两倍
            self.out_filters *= 2
        self.avp_1 = layers.GlobalAveragePooling2D()
        self.fc_1 = layers.Dense(275, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())

    def call(self, inputs, training=None, mask=None):
        x = self.cov_1(inputs)
        x = self.bn_1(x)
        x = self.ac_1(x)
        x = self.blocks(x)
        x = self.avp_1(x)
        y = self.fc_1(x)
        return y
