# 1.按照要求，使用mnist数据集完成cnn处理
from keras.datasets.fashion_mnist import load_data
from keras import Sequential, layers, activations, optimizers, losses, metrics
from keras.src.utils import to_categorical

# ①获取数据
# 1)获取数据信息
# 2)切分数据集
(x_train, y_train), (x_test, y_test) = load_data()
print()
# 3)对数据进行预处理工作
x_train = x_train.reshape(-1, 28, 28, 1) / 255
x_test = x_test.reshape(-1, 28, 28, 1) / 255

# 独热编码标签
y_train = to_categorical(y_train, num_classes=10)  # 指定类别总数为10
y_test = to_categorical(y_test, num_classes=10)  # ②模型处理
# 1)创建模型
# 2)做两次卷积，卷积核尺寸3*3，卷积核大小32，使用relu激活
# 3)配合最大池化，池化和2*2
model = Sequential([
    layers.Conv2D(filters=32, kernel_size=(3, 3), activation=activations.relu),
    # [(28+2*0)-3]/1 +1=(None,26,26,32)
    layers.MaxPooling2D(),
    # (None,13,13,32)
    layers.Conv2D(filters=32, kernel_size=(3, 3), activation=activations.relu),
    # [(13+2*0)-3]/1 +1=(None,11,11,32)
    layers.MaxPooling2D(),
    # [(13+2*0)-3]/1 +1=(None,5,5,32)
    # 4)将维度展开
    layers.Flatten(),
    # (None,800)
    # 5)创建两个全连接层，神经元数量分别为128,10
    layers.Dense(units=128, activation=activations.relu),
    # (None,128)
    layers.Dense(units=10, activation=activations.softmax)
    # (None,onehot_dim)

])
model.compile(optimizer=optimizers.Adam(), loss=losses.categorical_crossentropy,
              metrics=[metrics.CategoricalAccuracy(), metrics.Precision(), metrics.Recall(), metrics.F1Score()])
# 6)设置合理训练次数，保证测试集准确率高于80%
model.fit(x=x_train, y=y_train, batch_size=500, epochs=5)
model.evaluate(x_test, y_test)
