"""
内容：类似 VGG 的卷积神经网络
日期：2020年7月7日
作者：Howie
"""

import numpy as np
import matplotlib.pyplot as plt

from keras.utils import to_categorical, plot_model
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten
from keras.optimizers import SGD
from keras.losses import categorical_crossentropy

"""超参"""
# 输入
N_SAMPLES_TRAIN = 60000  # 训练集样本数量
N_SAMPLES_TEST = 10000  # 测试集样本数量
IMG_SIZE = 28  # 图片尺寸
N_CHANNELS = 1  # 信道数
N_CLS = 10  # 类别数

# 模型
FILTER_NUM_CONV1 = 64
FILTER_SIZE_CONV1 = (3, 3)
FILTER_SIZE_POOL1 = (2, 2)

FILTER_NUM_CONV2 = 128
FILTER_SIZE_CONV2 = (3, 3)
FILTER_SIZE_POOL2 = (2, 2)

FILTER_NUM_CONV3 = 256
FILTER_SIZE_CONV3 = (3, 3)
FILTER_SIZE_POOL3 = (2, 2)

FILTER_NUM_CONV4 = 512
FILTER_SIZE_CONV4 = (3, 3)
FILTER_SIZE_POOL4 = (2, 2)

UNITS_NUM_DENSE1 = 512
UNITS_NUM_DENSE2 = N_CLS

# 训练
BASE_LR = 0.01  # 学习率
DECAY_RATE = 1e-6  # 学习率衰减率
MOMENTUM = 0.9  # 动量值
BATCH_SIZE = 128  # 批次数量
EPOCHS = 5  # 周期
VAL_SIZE_RATE = 0.3  # 验证集比例
DROP_PROB = 0.25  # 漏失概率


def load_mnist():
    """
    # 生成MNIST数据集
    :return: 划分好的训练集和测试集
    """
    with np.load('../dataset/mnist/mnist.npz') as data:
        (X_train, Y_train), (X_test, Y_test) = (
            data['x_train'], data['y_train']), (data['x_test'], data['y_test'])
    X_train = X_train.reshape(
        N_SAMPLES_TRAIN,
        IMG_SIZE,
        IMG_SIZE,
        N_CHANNELS).astype('float32') / 255.0
    X_test = X_test.reshape(
        N_SAMPLES_TEST,
        IMG_SIZE,
        IMG_SIZE,
        N_CHANNELS).astype('float32') / 255.0
    Y_train = to_categorical(Y_train)
    Y_test = to_categorical(Y_test)

    print('train on {} samples\n'
          'test on {} samples'.format(X_train.shape[0], X_test.shape[0]))

    return X_train, Y_train, X_test, Y_test


class Model:
    """
    模型类
    """

    def __init__(self):
        """
        搭建模型
        """
        self.model = Sequential()
        self.model.add(
            Conv2D(
                FILTER_NUM_CONV1,
                FILTER_SIZE_CONV1,
                activation='relu',
                input_shape=(
                    IMG_SIZE,
                    IMG_SIZE,
                    N_CHANNELS), padding='same'))
        self.model.add(MaxPooling2D(pool_size=FILTER_SIZE_POOL1))
        self.model.add(
            Conv2D(
                FILTER_NUM_CONV2,
                FILTER_SIZE_CONV2,
                activation='relu', padding='same'))
        self.model.add(MaxPooling2D(pool_size=FILTER_SIZE_POOL2))
        self.model.add(
            Conv2D(
                FILTER_NUM_CONV3,
                FILTER_SIZE_CONV3,
                activation='relu', padding='same'))
        self.model.add(MaxPooling2D(pool_size=FILTER_SIZE_POOL3))
        self.model.add(
            Conv2D(
                FILTER_NUM_CONV4,
                FILTER_SIZE_CONV4,
                activation='relu', padding='same'))
        self.model.add(MaxPooling2D(pool_size=FILTER_SIZE_POOL4))
        self.model.add(Flatten())
        self.model.add(Dense(UNITS_NUM_DENSE1, activation='relu'))
        self.model.add(Dropout(DROP_PROB))
        self.model.add(Dense(UNITS_NUM_DENSE2, activation='softmax'))
        plot_model(
            self.model,
            to_file="./logs/Model_Demo5-3.pdf",
            show_shapes=True)

    def train(self, X_train, Y_train, ):
        """
        训练模型
        :param X_train: 训练集样本
        :param Y_train: 训练集标签
        :return:
        """
        sgd = SGD(
            lr=BASE_LR,
            decay=DECAY_RATE,
            momentum=MOMENTUM,
            nesterov=True)
        self.model.compile(loss=categorical_crossentropy, optimizer=sgd, metrics=['accuracy'])
        hist = self.model.fit(
            X_train,
            Y_train,
            batch_size=BATCH_SIZE,
            epochs=EPOCHS,
            validation_split=VAL_SIZE_RATE)
        self.hist_plot(hist, model_name='VGG')

    def evaluate(self, X_test, Y_test):
        """
        评价模型
        :param X_test: 测试集样本
        :param Y_test: 测试集标签
        :return:
        """
        loss_and_metrics = self.model.evaluate(
            X_test, Y_test, batch_size=BATCH_SIZE)
        print("Evaluation loss and metrics: {}".format(loss_and_metrics))

    def predcit(self, samples, labels):
        """
        调用模型
        :param samples: 样本
        :param labels: 标签
        :return:
        """
        predictions = self.model.predict_classes(samples)
        for i, prediction in enumerate(predictions):
            print("({}) Ground truth: {} \t Prediction: {}".format(
                i + 1, np.argmax(labels[i]), int(prediction)))

    def hist_plot(self, hist, model_name):
        """
        # 可视化训练过程
        :param hist: history对象
        :param model_name: 模型名称
        :return:
        """
        fig, loss_ax = plt.subplots()
        acc_ax = loss_ax.twinx()
        # 每个训练周期的训练误差与验证误差
        loss_ax.plot(hist.history['loss'], 'y', label='train loss')
        loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
        # 每个训练周期的训练精度与验证精度
        acc_ax.plot(hist.history['accuracy'], 'b', label='train acc')
        acc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')
        # 横轴与纵轴
        loss_ax.set_xlabel('epoch')
        loss_ax.set_ylabel('loss')
        acc_ax.set_ylabel('accuracy')
        # 标签
        loss_ax.legend(loc='upper left')
        acc_ax.legend(loc='lower left')
        # 保存
        plt.savefig('./logs/History_Demo5-3_' + model_name + '.pdf')
        # 展示
        plt.show()


def main():
    """
    主函数
    :return:
    """
    X_train, Y_train, X_test, Y_test = load_mnist()
    model = Model()
    model.train(X_train, Y_train)
    model.evaluate(X_test, Y_test)
    model.predcit(X_test[:10], Y_test[:10])


if __name__ == '__main__':
    main()
