import tensorflow as tf
from tensorflow.keras import layers

'''
Tensorflow gpu==2.1.0 cudnn==7.6.5 with 1060 driver v462.42
GPU -> 并行
CPU -> 串行
keras是一种api设计模式, tf.keras是tf对这种模式的实现
为了利用显卡的并行计算能力，一般在网络的计算过程中会同时计算多个样本，我们把这种训练方式叫做批训练
'''


class AlexNet(tf.keras.Model):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.c1 = layers.Conv2D(filters=96, kernel_size=(11, 11), strides=4, activation='relu',
                                input_shape=(227, 227, 3))
        self.p1 = layers.MaxPool2D(pool_size=(3, 3), strides=2)

        self.c2 = layers.Conv2D(filters=256, kernel_size=(5, 5), activation='relu', padding='same')
        self.p2 = layers.MaxPool2D(pool_size=(3, 3), strides=2)

        self.c3 = layers.Conv2D(filters=384, kernel_size=(3, 3), padding='same', activation='relu')
        self.c4 = layers.Conv2D(filters=384, kernel_size=(3, 3), padding='same', activation='relu')
        self.c5 = layers.Conv2D(filters=256, kernel_size=(3, 3), padding='same', activation='relu')
        self.p3 = layers.MaxPool2D(pool_size=(3, 3), strides=2)

        self.flatten = layers.Flatten()
        self.f1 = layers.Dense(4096, activation='relu')
        self.d1 = layers.Dropout(0.5)
        self.f2 = layers.Dense(4096, activation='relu')
        self.d2 = layers.Dropout(0.5)
        self.f3 = layers.Dense(275, activation='softmax')

    @tf.function
    def call(self, inputs, training=None, mask=None):
        x = self.c1(inputs)
        x = self.p1(x)

        x = self.c2(x)
        x = self.p2(x)

        x = self.c3(x)
        x = self.c4(x)
        x = self.c5(x)
        x = self.p3(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.d1(x)
        x = self.f2(x)
        x = self.d2(x)
        y = self.f3(x)
        return y
