from tensorflow.keras.layers import Dense, MaxPooling2D, BatchNormalization, \
    Activation, Conv2D, Flatten, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras import backend
from tensorflow.keras.regularizers import l1


class Self_Net:
    @staticmethod
    def build(*, width: int, height: int, depth: int, classes: int) -> Sequential:
        if backend.image_data_format() != "channels_first":
            input_shape = (height, width, depth)
            channel_dim = -1  # 记录通道维度位置
        else:
            input_shape = (depth, height, width)
            channel_dim = 1
        # 开始构建
        model = Sequential()
        # 第一层
        model.add(Conv2D(64, (3, 3), input_shape=input_shape, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=channel_dim))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.25))

        # 第二层
        model.add(Conv2D(128, (3, 3), padding="same", kernel_regularizer=l1(0.01)))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=channel_dim))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.25))

        # 第三层
        model.add(Conv2D(64, (3, 3), padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(axis=channel_dim))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        model.add(Dropout(0.25))

        # 全连接层
        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation("relu"))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))

        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model

# if __name__ == '__main__':
#     s = Self_Net.build(width=32, height=32, depth=3, classes=10)
