# 2.运用keras 使用leNet5处理mnist数据集
from keras.datasets.mnist import load_data
from keras import Sequential, layers, activations, optimizers, losses, utils, Model

# ①加载mnist数据集，并进行适当的数据预处理
(x_train, y_train), (x_test, y_test) = load_data()
# ② 特征缩放, 归一化特征缩放
x_train = x_train.reshape(-1, 28, 28, 1) / 255
x_test = x_test.reshape(-1, 28, 28, 1) / 255
# ③ 对数据做独热处理，选择合适的函数完成
y_train = utils.to_categorical(y_train)
y_test = utils.to_categorical(y_test)
print()


# ④ 根据结构图创建LeNet模型
class LeNet(Model):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.conv = Sequential([
            layers.Conv2D(filters=6, kernel_size=(5, 5)),
            layers.MaxPooling2D(),
            layers.Conv2D(filters=16, kernel_size=(5, 5)),
            layers.MaxPooling2D()
        ])
        self.flat = Sequential([layers.Flatten()])
        self.fc = Sequential([
            layers.Dense(units=120, activation=activations.relu),
            layers.Dense(units=84, activation=activations.relu),
            layers.Dense(units=10, activation=activations.softmax)
        ])

    def call(self, inputs, training=None, mask=None):
        out = self.conv(inputs)
        out = self.flat(out)
        out = self.fc(out)
        return out


# ⑤  适当优化模型
model = LeNet()
model.build(input_shape=(None, 28, 28, 1))
model.summary()
# ⑥  配置模型优化器、损失函数、评估函数
model.compile(optimizer=optimizers.Adam(), loss=losses.categorical_crossentropy, metrics='acc')

# ⑦  训练模型：批量数据100，迭代10次
log = model.fit(x_train, y_train, batch_size=100, epochs=10)
print()
# ⑧  获得模型迭代过程中的代价值
loss=log.history['loss']
# ⑨  获得模型迭代过程中训练集的准确度
acc=log.history['acc']
# ⑩使用训练好的模型对测试集进行预测
model.evaluate(x_test,y_test)
