import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, Model

#（1）分组卷积块
def group_conv(inputs, filters, stride, num_groups):
    '''
    inputs为输入特征图
    filters为每个分组卷积的输出通道数
    stride为分组卷积的步长
    num_groups为分几组
    '''
    # 用来保存每个分组卷积的输出特征图
    groupList = []

    for i in range(num_groups):  # 遍历每一组
        # 均匀取出需要卷积的特征图inputs.shape=[b,h,w,c]
        x = inputs[:, :, :, i*filters: (i+1)*filters]
        # 分别对每一组卷积使用3*3卷积
        x = layers.Conv2D(filters, kernel_size=3, strides=stride, padding='same', use_bias=False)(x)
        # 将每个分组卷积结果保存起来
        groupList.append(x)
    
    # 将每个分组卷积的输出特征图在通道维度上堆叠
    x = layers.Concatenate()(groupList)

    x = layers.BatchNormalization()(x)  # 批标准化

    x = layers.Activation('relu')(x)  # 激活函数

    return x

#（2）一个残差单元
def res_block(inputs, out_channel, stride, shortcut, num_groups=32):
    '''
    inputs输入特征图
    out_channel最后一个1*1卷积的输出通道数
    stride=2下采样, 图像长宽减半, 残差边对输入卷积后再连接输出
    stride=1基本模块, size不变, 残差连接输入和输出
    num_groups代表3*3分组卷积分了几组
    shortcut判断是否要调整通道数
    '''
    # 残差边
    if shortcut is False:  # 直接使用参加连接输入和输出
        residual = inputs
    
    elif shortcut is True:  # 调整通道数
        # 1*1卷积调整通道数，使输入输出的size和通道数相同
        residual = layers.Conv2D(out_channel, kernel_size=1, strides=stride, padding='same', use_bias=False)(inputs)
        # 有BN层就不需要偏置
        residual = layers.BatchNormalization()(residual)

    # 1*1卷积，输出通道数是最后一个1*1卷积层输出通道数的一半
    x = layers.Conv2D(filters = out_channel//2, kernel_size=1, strides=1,
                      padding = 'same', use_bias = False)(inputs)
    
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    # 3*3分组卷积
    group_filters = (out_channel//2) // num_groups  # 每一组卷积的输出通道数
    x = group_conv(x, filters = group_filters, stride = stride, num_groups = num_groups)

    # 1*1卷积上升通道
    x = layers.Conv2D(filters = out_channel, kernel_size = 1, strides = 1,
                      padding = 'same', use_bias = False)(x)

    x = layers.BatchNormalization()(x)

    # 残差连接，保证x和残差边的shape相同
    x = layers.Add()([x, residual])
    x = layers.Activation('relu')(x)

    return x

#（3）一个残差块
def stage(x, num, out_channel, first_stride):

    # 第一个残差单元下采样步长可能是1也可能是2，第一个残差单元需要调整残差边通道数
    x = res_block(x, out_channel, stride=first_stride, shortcut=True)

    # 其他的都是基本模块strides=1
    for _ in range(num-1):
        x = res_block(x, out_channel, stride=1, shortcut=False)

    # 残差块输出结果
    return x

#（4）网络骨架
def resnext(input_shape, classes):
    '''
    input_shape代表输入图像的shape
    classes代表分类类别的数量
    '''
    # 构造输入层
    inputs = keras.Input(shape=input_shape)

    # 7*7标准卷积[224,224,3]==>[112,112,64]
    x = layers.Conv2D(filters=64, kernel_size=7, strides=2,
                      padding='same', use_bias=False)(inputs)
    
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)

    # 最大池化[112,112,64]==>[56,56,64]
    x = layers.MaxPooling2D(pool_size=(3,3), strides=2, padding='same')(x)

    # [56,56,64]==>[56,56,256]
    x = stage(x, num=3, out_channel=256, first_stride=1)

    # [56,56,256]==>[28,28,512]
    x = stage(x, num=4, out_channel=512, first_stride=2)

    # [28,28,512]==>[14,14,1024]
    x = stage(x, num=6, out_channel=1024, first_stride=2)

    # [14,14,1024]==>[7,7,2048]
    x = stage(x, num=3, out_channel=2048, first_stride=2)

    # [7,7,2048]==>[None,2048]
    x = layers.GlobalAveragePooling2D()(x)

    # [None,2048]==>[None,classes]
    logits = layers.Dense(classes)(x)  # 输出不经过softmax激活函数

    # 构建模型
    model = Model(inputs, logits)

    # 返回模型
    return model

#（5）接收网络模型
if __name__ == '__main__':

    model = resnext(input_shape = [224,224,3],  # 输入图像shape
                    classes = 1000)  # 分类数
    
    model.summary()  # 查看网络架构
