"""
内容：快速开始上手Keras
日期：2020年7月6日
作者：Howie
"""
import numpy as np
import matplotlib.pyplot as plt
from keras import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from keras.losses import categorical_crossentropy
from keras.utils import to_categorical

# 预设
DATASET_PATH = '../dataset/mnist/mnist.npz'  # 数据集路径
PLT_ROW = 5
PLT_COL = 5
# 超参
N_SAMPLES_TRAIN = 60000     # 训练集样本数量
N_SAMPLES_TEST = 10000  # 测试集样本数量
IMG_SIZE = 28  # 图片尺寸
N_CLS = 10  # 类别数
EPOCHS = 50  # 周期
BATCH_SIZE = 128  # 批次数量
VAL_SET_SPLIT = 0.2  # 验证集比例


def hist_plot(hist, model_name):
    """
    # 可视化训练过程
    :param hist: history对象
    :return:
    """
    fig, loss_ax = plt.subplots()
    acc_ax = loss_ax.twinx()
    # 每个训练周期的训练误差与验证误差
    loss_ax.plot(hist.history['loss'], 'y', label='Loss')
    loss_ax.plot(hist.history['val_loss'], 'r', label='Val Loss')
    loss_ax.set_ylim([0.0, 0.5])
    # 每个训练周期的训练精度与验证精度
    acc_ax.plot(hist.history['accuracy'], 'b', label='Acc')
    acc_ax.plot(hist.history['val_accuracy'], 'g', label='Val Acc')
    acc_ax.set_ylim([0.8, 1.0])
    # 横轴与纵轴
    loss_ax.set_xlabel('epoch')
    loss_ax.set_ylabel('loss')
    acc_ax.set_ylabel('accuracy')
    # 标签
    loss_ax.legend(loc='upper left')
    acc_ax.legend(loc='lower left')
    # 标题
    plt.title(model_name)
    # 保存
    plt.savefig('./logs/History_Demo5-1_' + model_name + '.pdf')
    # 展示
    plt.show()


def load_dataset():
    """
    # 生成数据集
    :return: 划分好的训练集和测试集
    """
    with np.load(DATASET_PATH) as data:
        (X_train, Y_train), (X_test, Y_test) = (
            data['x_train'], data['y_train']), (data['x_test'], data['y_test'])
    # 独热编码处理
    Y_train = to_categorical(Y_train)
    Y_test = to_categorical(Y_test)
    # 用于MLP
    X_train = X_train.reshape(
        N_SAMPLES_TRAIN,
        IMG_SIZE * IMG_SIZE).astype('float32') / 255.0  # 数据正则化
    X_test = X_test.reshape(N_SAMPLES_TEST,
                            IMG_SIZE * IMG_SIZE).astype('float32') / 255.0
    print('train samples: {}\n'
          'test samples: {}'.format(X_train.shape[0], X_test.shape[0]))

    # 多元分类问题中的标签值指定为0~9，但此处改为奇数/偶数的二元分类标签值
    # 1代表奇数，0代表偶数
    return X_train, Y_train, X_test, Y_test


model = Sequential()
# 使用 .add() 来堆叠模型
model.add(Dense(units=64, activation='relu', input_dim=IMG_SIZE * IMG_SIZE))
model.add(Dense(units=10, activation='softmax'))
# 在完成了模型的构建后, 可以使用 .compile() 来配置学习过程
# model.compile(loss='categorical_crossentropy',
#               optimizer='sgd',
#               metrics=['accuracy'])

# 上面这种在书中已经用过了，这次我们进一步地配置优化器
model.compile(
    loss=categorical_crossentropy,
    optimizer=SGD(
        lr=0.01,
        momentum=0.9,
        nesterov=True),
    metrics=['accuracy'])  # Nesterov Momentum是对Momentum的改进，可以理解为nesterov动量在标准动量方法中添加了一个校正因子

# 生成数据集
X_train, Y_train, X_test, Y_test = load_dataset()
# 批量地在训练数据上进行迭代
hist = model.fit(
    X_train,
    Y_train,
    epochs=EPOCHS,
    batch_size=BATCH_SIZE,
    validation_split=VAL_SET_SPLIT)
# 查看训练过程
hist_plot(hist, 'Multi-Layer Perceptron')
# 评估模型性能
loss_and_metrics = model.evaluate(X_test, Y_test, batch_size=BATCH_SIZE)
print("Loss and metrics: {}".format(loss_and_metrics))
# 对新的前5个数据生成预测
classes = model.predict(X_test[:5], batch_size=BATCH_SIZE)
for i, Y_hat_test in enumerate(classes):
    print("Groud truth: {} Prediction: {}".format(
        np.argmax(Y_test[i]), np.argmax(Y_hat_test)))
