# 1.完成以下操作
# 1.导入必要的包
import matplotlib.pyplot as plt
from keras.datasets import mnist
import tensorflow as tf

tf.random.set_seed(18)

# 2.加载mnist数据集，切分数据
(trax, tray), (tesx, tesy) = mnist.load_data()
# 3.对数据进行预处理工作，归一化
trax = trax.reshape(-1, 28, 28, 1) / 255
tesx = tesx.reshape(-1, 28, 28, 1) / 255
print(trax.shape)
# 4. 定义VGG16模型
# 5. 创建名为VGG16的模型类并继承自Model类，参照下图实现，填充方式padding=same,激活函数使用relu。
from keras import Model, layers, Sequential, optimizers, activations, losses


class VGG16(Model):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.conv = Sequential([
            layers.Conv2D(filters=16, kernel_size=(3, 3), activation=activations.relu, padding='same'),
            layers.Conv2D(filters=16, kernel_size=(3, 3), activation=activations.relu, padding='same'),
            layers.MaxPooling2D(),
            layers.Conv2D(filters=32, kernel_size=(3, 3), activation=activations.relu, padding='same'),
            layers.Conv2D(filters=32, kernel_size=(3, 3), activation=activations.relu, padding='same'),
            layers.MaxPooling2D(),
            layers.Conv2D(filters=64, kernel_size=(3, 3), activation=activations.relu, padding='same'),
            layers.Conv2D(filters=64, kernel_size=(3, 3), activation=activations.relu, padding='same'),
            layers.Conv2D(filters=64, kernel_size=(3, 3), activation=activations.relu, padding='same'),
            layers.MaxPooling2D(),
            layers.Conv2D(filters=128, kernel_size=(3, 3), activation=activations.relu, padding='same'),
            layers.Conv2D(filters=128, kernel_size=(3, 3), activation=activations.relu, padding='same'),
            layers.Conv2D(filters=128, kernel_size=(3, 3), activation=activations.relu, padding='same'),
            layers.MaxPooling2D(),
        ])
        self.flat = layers.Flatten()
        self.func = Sequential([
            layers.Dense(units=64, activation=activations.relu),
            layers.Dropout(0.4),
            layers.Dense(units=64, activation=activations.relu),
            layers.Dropout(0.4),
            layers.Dense(units=10, activation=activations.softmax)
        ])

    def call(self, inputs, training=None, mask=None):
        out = self.conv(inputs)
        out = self.flat(out)
        out = self.func(out)
        return out


# 35.创建VGG16模型的实例
model = VGG16()
# 36.指定模型的输入形状
model.build(input_shape=[None, 28, 28, 1])
# 37.打印模型的概况信息
model.summary()
# 38.编译模型，选取合适的优化器、损失函数及准确率函数
choose = 2000
model.compile(optimizer=optimizers.Adam(), loss=losses.sparse_categorical_crossentropy, metrics='acc')
history = model.fit(trax[:choose], tray[:choose], epochs=10)
# 41.画出不同优化器的损失值
plt.plot(history.history['loss'], c='r', label='Adam')
model = VGG16()
model.compile(optimizer=optimizers.RMSprop(), loss=losses.sparse_categorical_crossentropy, metrics='acc')
history = model.fit(trax[:choose], tray[:choose], epochs=10)
plt.plot(history.history['loss'], c='b', label='RMSprop')
model = VGG16()
model.compile(optimizer=optimizers.Adagrad(), loss=losses.sparse_categorical_crossentropy, metrics='acc')
history = model.fit(trax[:choose], tray[:choose], epochs=10)
plt.plot(history.history['loss'], c='g', label='Adagrad')
plt.legend()
plt.show()
