# 一：导入库函数
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt

# 二：参数配置
# 图片显示中文字体的配置
plt.rcParams["font.family"] = "SimHei", "sans-serif"
# GPU显存的分配配置
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)

# 三：加载数据
mnist = tf.keras.datasets.mnist
(train_x, train_y), (test_x, test_y) = mnist.load_data()
print(train_x.shape)  # (60000, 28, 28)
print(train_y.shape)  # (60000,)
print(test_x.shape)  # (10000, 28, 28)
print(test_y.shape)  # (10000,)

# 四：数据预处理
# 为了加快迭代速度, 还要对属性进行归一化, 使其取值范围在 (0, 1)之间
# 与此同时, 把它转换为Tensor张量, 数据类型是 32 位的浮点数.
# 把标签值也转换为Tensor张量,数据类型是 8 位的整型数.
X_train, X_test = tf.cast(train_x / 255.0, tf.float32), tf.cast(test_x / 255.0, tf.float32)
Y_train, Y_test = tf.cast(train_y, tf.int16), tf.cast(test_y, tf.int16)

# 由于卷积函数中的参数 input_shape 要求输入为四维张量, 最后一维是通道数
# 而 mnist 数据集是灰度图像, 是三维数据, 没有表示通道的维度, 因此, 需要
# 将其转化为四维数据.增加一个通道维度.
X_train = train_x.reshape(60000, 28, 28, 1)
X_test = test_x.reshape(10000, 28, 28, 1)

print(X_train.shape)  # (60000, 28, 28, 1)
print(X_test.shape)  # (10000, 28, 28, 1)

# 五：建立模型
# 首先创建一个 Sequential 对象 model, 添加卷积层 1 ,
model = tf.keras.Sequential([

    # unit 1
    # 添加卷积层 1 , 卷积核数量为 16, 卷积核的大小为 3 x 3,
    tf.keras.layers.Conv2D(16, kernel_size=(3, 3), padding="same", activation=tf.nn.relu, input_shape=(28, 28, 1)),
    # 添加池化层 1, 采用最大池化, 池化模板尺寸为 (2, 2)
    tf.keras.layers.MaxPool2D(pool_size=(2, 2)),

    # unit 2
    # 添加卷积层 2 , 卷积核数量为 32, 卷积核的大小为 3 x 3, 由于直接接收上一层的输出, 所以这里无需对输入形状进行设置
    tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding="same", activation=tf.nn.relu),
    # 添加池化层 2, 采用最大池化, 池化模板尺寸为 (2, 2)
    tf.keras.layers.MaxPool2D(pool_size=(2, 2)),
    # 至此, 特征层构建完成.

    # unit 3
    # 添加 Flatten 层, 将池化层的输出的三维张量转化为一维张量
    tf.keras.layers.Flatten(),

    # unit 4
    # 最后, 再添加一个隐含层核一个输出层, 隐含层中的结点个数为 128 ,
    tf.keras.layers.Dense(128, activation="relu"),
    # 输出层中的结点个数为 10
    tf.keras.layers.Dense(10, activation="softmax")

])

# 六：查看构建的卷积神经网络结构和参数信息
model.summary()
"""
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 28, 28, 16)        160       
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 14, 14, 16)        0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 14, 14, 32)        4640      
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 7, 7, 32)          0         
_________________________________________________________________
flatten (Flatten)            (None, 1568)              0         
_________________________________________________________________
dense (Dense)                (None, 128)               200832    
_________________________________________________________________
dense_1 (Dense)              (None, 10)                1290      
=================================================================
Total params: 206,922
Trainable params: 206,922
Non-trainable params: 0
_________________________________________________________________
"""

# 七：配置模型训练方法
model.compile(optimizer='adam',  # 优化器
              loss='sparse_categorical_crossentropy',  # 损失函数
              metrics=['sparse_categorical_accuracy'])  # 模型训练时, 我们希望输出的评测指标

# 八：训练模型
history = model.fit(X_train, Y_train, batch_size=64, epochs=5, validation_split=0.2)
"""
750/750 [==============================] - 11s 6ms/step - loss: 0.6558 - sparse_categorical_accuracy: 0.9245 
                                                        - val_loss: 0.0965 - val_sparse_categorical_accuracy: 0.9707
Epoch 2/5
750/750 [==============================] - 5s 6ms/step - loss: 0.0753 - sparse_categorical_accuracy: 0.9772 
                                                       - val_loss: 0.0769 - val_sparse_categorical_accuracy: 0.9793
Epoch 3/5
750/750 [==============================] - 5s 7ms/step - loss: 0.0484 - sparse_categorical_accuracy: 0.9855 
                                                       - val_loss: 0.0642 - val_sparse_categorical_accuracy: 0.9826
Epoch 4/5
750/750 [==============================] - 5s 6ms/step - loss: 0.0337 - sparse_categorical_accuracy: 0.9891 
                                                       - val_loss: 0.0618 - val_sparse_categorical_accuracy: 0.9837
Epoch 5/5
750/750 [==============================] - 5s 6ms/step - loss: 0.0313 - sparse_categorical_accuracy: 0.9901 
                                                       - val_loss: 0.0790 - val_sparse_categorical_accuracy: 0.9808
"""

# 九：使用测试集中的数据来评估模型性能
# 这里使用 mnist 本身的测试集来评估模型
# verbose=2 表示输出进度条进度
model.evaluate(X_test, Y_test, batch_size=64, verbose=2)
"""
157/157 - 2s - loss: 0.0706 - sparse_categorical_accuracy: 0.9811
"""

# 十：保存训练的日志文件
pd.DataFrame(history.history).to_csv("training_log.csv", index=False)

# 十一: 绘制训练曲线
# 读取保存后的训练日志文件
graph = pd.read_csv("training_log.csv")

# 绘制训练曲线
graph.plot(figsize=(8, 5))
plt.grid(1)  # 网格线
plt.xlim(0, 4)
plt.ylim(0, 1)

# 十二：应用模型 -- 预测数据
# 下面再随机取出测试集中的任意 10 个数据进行识别
plt.figure()
for i in range(10):
    num = np.random.randint(1, 10000)

    plt.subplot(2, 5, i + 1)
    plt.axis("off")
    plt.imshow(test_x[num], cmap="gray")
    demo = model.predict(tf.reshape(X_test[num], (1, 28, 28, 1)))
    y_pred = np.argmax(demo)
    plt.title("y= " + str(test_y[num]) + "\n" + "y_pred=" + str(y_pred))
    plt.suptitle("随机取出测试集中的任意10个数据进行识别", fontsize=20, color="red", backgroundcolor="yellow")

plt.show()

# 十三：保存模型
model.save("mnist_model_convolutional.h5")
