# 使用全连接神经网络
from keras.utils import to_categorical
# regularizers正则器
from keras import models, layers, regularizers
# optimizers ：梯度下降的一个优化器，本次选择均方和传播即RMSprop
from keras.optimizers import RMSprop
# 使用mnist数据集
from keras.datasets import mnist
import matplotlib.pyplot as plt

"""--------------------------------加载数据集--------------------------------"""
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# 60000张训练图片，28x28像素 10000张测试图片28x28像素
# print(train_images.shape, test_images.shape)
# print(train_images[0])
# print(train_labels[0])
# plt.imshow(train_images[0])
# plt.show()

# 将图片由二维铺开成一维
train_images = train_images.reshape((60000, 28 * 28)).astype('float')
test_images = test_images.reshape((10000, 28 * 28)).astype('float')
# 进行onehot编码
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
print("train_labels[0]:", train_labels[0], "\n")

"""--------------------------------建立神经网络模型--------------------------------"""
network = models.Sequential()
# 设置第一个隐藏层神经元个数128个，      使用relu作为激活函数                          使用一点点正则化解决轻微过拟合问题
network.add(layers.Dense(units=128, activation='relu', input_shape=(28 * 28,), kernel_regularizer=regularizers.l1(0.0001)))
# 以百分之一的概率让神经元丧失功能
network.add(layers.Dropout(0.01))
# 设置第二个隐藏层神经元个数32个
network.add(layers.Dense(units=32, activation='relu', kernel_regularizer=regularizers.l1(0.0001)))
network.add(layers.Dropout(0.01))
network.add(layers.Dense(units=10, activation='softmax'))

# 编译：确定优化器和损失函数等
network.compile(optimizer=RMSprop(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
# 训练网络，用fit函数, epochs表示训练多少个回合， batch_size表示每次训练给多大的数据
network.fit(train_images, train_labels, epochs=20, batch_size=128, verbose=2)

# 测试模型性能
y = network.predict(test_images[:5])
print(y, test_labels[:5])
test_loss, test_accuracy = network.evaluate(test_images, test_labels)
print("test_loss:", test_loss, "    test_accuracy:", test_accuracy)
