# ------------------------------------- 加载MNIST数据集 -----------------------
from keras.datasets import mnist

(train_images, train_labels), (test_images, test_labels) = mnist.load_data()

# ------------------------------------- 准备数据 -----------------------
from keras.utils import to_categorical

# 注意这里的train_images.reshape的形状是(28,28,1)了，因为这里的convnet层根之前的dense层的输入不一样
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1))
test_images = test_images.astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)

# ------------------------------------- 从数据中分离出验证数据 -----------------------
from sklearn.model_selection import train_test_split

random_seed = 3
train_images, validation_images, train_labels, validation_labels = train_test_split(train_images, train_labels,
                                                                                    test_size=0.1,
                                                                                    random_state=random_seed)

# ------------------------------------- 模型定义 -----------------------
from keras import models
from keras import layers

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
# 重要的是，卷积神经网络接收形状为 (image_height, image_width, image_channels)
# 的输入张量（不包括批量维度）。本例中设置卷积神经网络处理大小为 (28, 28, 1) 的输入张量，
# 这正是 MNIST 图像的格式。我们向第一层传入参数 input_shape=(28, 28, 1) 来完成此设置。
# 我们来看一下目前卷积神经网络的架构。
# print(model.summary())
# _________________________________________________________________
# Layer (type)                 Output Shape              Param #
# =================================================================
# conv2d_1 (Conv2D)            (None, 26, 26, 32)        320
# _________________________________________________________________
# max_pooling2d_1 (MaxPooling2 (None, 13, 13, 32)        0
# _________________________________________________________________
# conv2d_2 (Conv2D)            (None, 11, 11, 64)        18496
# _________________________________________________________________
# max_pooling2d_2 (MaxPooling2 (None, 5, 5, 64)          0
# _________________________________________________________________
# conv2d_3 (Conv2D)            (None, 3, 3, 64)          36928
# =================================================================
# Total params: 55,744
# Trainable params: 55,744
# Non-trainable params: 0

# 可以看到，每个 Conv2D 层和 MaxPooling2D 层的输出都是一个形状为 (height, width,
# channels) 的 3D 张量。宽度和高度两个维度的尺寸通常会随着网络加深而变小。通道数量由传
# 入 Conv2D 层的第一个参数所控制（32 或 64）。

# 下一步是将最后的输出张量［大小为 (3, 3, 64)］输入到一个密集连接分类器网络中，
# 即 Dense 层的堆叠，你已经很熟悉了。这些分类器可以处理 1D 向量，而当前的输出是 3D 张量。
# 首先，我们需要将 3D 输出展平为 1D，然后在上面添加几个 Dense 层。

# ------------------------------------- 在卷积神经网络上添加分类器 -----------------------
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
# print(model.summary())
# _________________________________________________________________
# Layer (type)                 Output Shape              Param #
# =================================================================
# conv2d_1 (Conv2D)            (None, 26, 26, 32)        320
# _________________________________________________________________
# max_pooling2d_1 (MaxPooling2 (None, 13, 13, 32)        0
# _________________________________________________________________
# conv2d_2 (Conv2D)            (None, 11, 11, 64)        18496
# _________________________________________________________________
# max_pooling2d_2 (MaxPooling2 (None, 5, 5, 64)          0
# _________________________________________________________________
# conv2d_3 (Conv2D)            (None, 3, 3, 64)          36928
# _________________________________________________________________
# flatten_1 (Flatten)          (None, 576)               0
# _________________________________________________________________
# dense_1 (Dense)              (None, 64)                36928
# _________________________________________________________________
# dense_2 (Dense)              (None, 10)                650
# =================================================================
# Total params: 93,322
# Trainable params: 93,322
# Non-trainable params: 0


# ------------------------------------- 编译模型 -----------------------

model.compile(optimizer='rmsprop',
              loss='categorical_crossentropy',
              metrics=['accuracy'])

# ------------------------------------- 数据增强处理 -----------------------
# 数据增强处理，提升模型的泛化能力，也可以有效的避免模型的过拟合
# rotation_range : 旋转的角度
# zoom_range : 随机缩放图像
# width_shift_range : 水平移动占图像宽度的比例
# height_shift_range
# horizontal_filp : 水平反转
# vertical_filp : 纵轴方向上反转
from keras.preprocessing.image import ImageDataGenerator

data_augment = ImageDataGenerator(rotation_range=10, zoom_range=0.1,
                                  width_shift_range=0.1, height_shift_range=0.1,
                                  horizontal_flip=False, vertical_flip=False)

# ------------------------------------- 训练模型-----------------------


from keras.callbacks import ReduceLROnPlateau

# keras的callback类提供了可以跟踪目标值，和动态调整学习效率
# moitor : 要监测的量，这里是验证准确率
# matience: 当经过３轮的迭代，监测的目标量，仍没有变化，就会调整学习效率
# verbose : 信息展示模式，去０或１
# factor :　每次减少学习率的因子，学习率将以lr = lr*factor的形式被减少
# mode：‘auto’，‘min’，‘max’之一，在min模式下，如果检测值触发学习率减少。在max模式下，当检测值不再上升则触发学习率减少。
# epsilon：阈值，用来确定是否进入检测值的“平原区”
# cooldown：学习率减少后，会经过cooldown个epoch才重新进行正常操作
# min_lr：学习率的下限
# patience:没有进步的训练轮数，在这之后训练速率会被降低。
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3,
                                            verbose=1, factor=0.5, min_lr=0.00001)

from keras.callbacks import ModelCheckpoint

# 创建一个callback类的实例ModelCheckpoint,对最优的模型进行保存
filepath = "weights-improvement-{epoch:02d}-{val_acc:.4f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='auto')
callbacks_list = [checkpoint, learning_rate_reduction]

# history = model.fit(train_images,
#                     train_labels,
#                     epochs=5,
#                     batch_size=64,
#                     validation_split=0.1,
#                     callbacks=callbacks_list)
epochs = 40
batch_size = 100

history = model.fit_generator(data_augment.flow(train_images, train_labels, batch_size=batch_size),
                              epochs=epochs, validation_data=(validation_images, validation_labels),
                              verbose=1, steps_per_epoch=train_images.shape[0] // batch_size,
                              callbacks=callbacks_list)

# ------------------------------------- 评估模型 -----------------------
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('test_loss:', test_loss)
print('test_acc:', test_acc)

# -------------------------------------08 绘制训练损失和验证损失-----------------------
# import matplotlib.pyplot as plt
#
# history_dict = history.history
# loss_values = history_dict['loss']
# val_loss_values = history_dict['val_loss']
# epochs = range(1, len(loss_values) + 1)
# plt.plot(epochs, loss_values, 'bo', label='Training loss')
# plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
# plt.title('Training and validation loss')
# plt.xlabel('Epochs')
# plt.ylabel('Loss')
# plt.legend()
# # 这样matplotlib就不阻塞了
# plt.show(block=False)
# plt.show()
#
# # -------------------------------------09 绘制训练精度和验证精度-----------------------
# plt.clf()
# acc = history_dict['acc']
# val_acc = history_dict['val_acc']
# plt.plot(epochs, acc, 'bo', label='Training acc')
# plt.plot(epochs, val_acc, 'b', label='Validation acc')
# plt.title('Training and validation accuracy')
# plt.xlabel('Epochs')
# plt.ylabel('Accuracy')
# plt.legend()
# # 这样matplotlib就不阻塞了
# plt.show(block=False)
# plt.show()
