"""
内容：损失函数 Losses/评估标准 Metrics/优化器 Optimizers/
     回调函数 Callbacks/初始化 Initializers/正则化 Regularizers
日期：2020年7月10日
作者：Howie
"""
import numpy as np
from keras.models import Model, load_model
from keras.layers import Input, Conv2D, MaxPooling2D, Flatten, Dense
from keras.utils import plot_model, to_categorical, multi_gpu_model
from keras.losses import categorical_crossentropy
from keras.optimizers import SGD
from keras.metrics import categorical_accuracy
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.initializers import zeros, glorot_uniform
from keras.regularizers import l1_l2
import tensorflow as tf

ckpt = ModelCheckpoint('./checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                       save_best_only=True)
early_stop = EarlyStopping(monitor='val_accuracy', min_delta=1e-2, patience=8)


def get_lr_metric(optimizer):
    """
    显示学习率
    :return:
    """
    def lr(y_true, y_pred):
        return optimizer.lr
    return lr


def load_mnist():
    """
    # 生成数据集
    :return: 划分好的训练集和测试集
    """
    with np.load('../dataset/mnist/mnist.npz') as data:
        (X_train, Y_train), (X_test, Y_test) = (
            data['x_train'], data['y_train']), (data['x_test'], data['y_test'])
    # 数据集预处理
    X_train = X_train.reshape(60000, 28, 28, 1).astype('float32') / 255.0
    X_test = X_test.reshape(10000, 28, 28, 1).astype('float32') / 255.0
    # 标签数据独热编码处理
    Y_train = to_categorical(Y_train)
    Y_test = to_categorical(Y_test)
    # 打印划分画好的数据集规模
    print('train samples: {}\n'
          'test samples: {}'.format(X_train.shape[0], X_test.shape[0]))

    return X_train, Y_train, X_test, Y_test


class LeNet:
    """
    搭建LeNet模型: 2 Conv + 1 FC + 1 FC
    """

    def __init__(self):
        """
        # 初始化
        """
        input_img = Input(shape=(28, 28, 1))
        conv2D_1 = Conv2D(
            filters=6,
            kernel_size=(
                5,
                5),
            kernel_initializer=glorot_uniform(),
            bias_initializer=zeros(),
            activity_regularizer=l1_l2(),
            activation='relu')(input_img)
        maxPool_1 = MaxPooling2D(pool_size=(2, 2))(conv2D_1)
        conv2D_2 = Conv2D(
            filters=16, kernel_size=(
                5, 5),
            kernel_initializer=glorot_uniform(),
            bias_initializer=zeros(),
            activity_regularizer=l1_l2(),
            activation='relu')(maxPool_1)
        maxPool_2 = MaxPooling2D(pool_size=(2, 2))(conv2D_2)
        flatten = Flatten()(maxPool_2)
        fc_1 = Dense(
            units=120,
            kernel_initializer=glorot_uniform(),
            bias_initializer=zeros(),
            activity_regularizer=l1_l2())(flatten)
        fc_2 = Dense(
            units=84,
            kernel_initializer=glorot_uniform(),
            bias_initializer=zeros(),
            activity_regularizer=l1_l2())(fc_1)
        output = Dense(units=10, activation='softmax')(fc_2)

        self.model = Model(inputs=input_img, outputs=output)
        # 可视化
        plot_model(
            model=self.model,
            to_file='./logs/LeNet.pdf',
            show_shapes=True)

    def train(self, X_train, Y_train):
        """
        # 训练模型
        :param X_train:
        :param Y_train:
        :return:
        """
        optimizer = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
        self.model.compile(
            loss=categorical_crossentropy,
            optimizer=optimizer,
            metrics=[categorical_accuracy, get_lr_metric(optimizer)])
        self.model.fit(
            X_train,
            Y_train,
            batch_size=128,
            epochs=1,
            validation_split=0.3,
            callbacks=[ckpt])
        self.model.save(filepath='infer_model/lenet_mnist.h5')

    def parallel_train(self):
        """
        # 并行训练
        :return:
        """
        # 实例化基础模型（或「模板」模型）
        with tf.device('/cpu:0'):
            model = LeNet().model
        # 将模型复制到2个GPU上
        parallel_model = multi_gpu_model(model, gpus=2)
        optimizer = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
        parallel_model.compile(
            loss=categorical_crossentropy,
            optimizer=optimizer,
            metrics=[categorical_accuracy, get_lr_metric(optimizer)])
        # 这个 `fit` 调用将分布在 2 个 GPU 上。由于批大小为 256，每个 GPU 将处理 128 个样本。
        parallel_model.fit(
            X_train,
            Y_train,
            batch_size=256,
            epochs=1,
            validation_split=0.3,
            callbacks=[ckpt])
        model.save(filepath='infer_model/parallel_lenet_mnist.h5')


    def evaluate(self, X_test, Y_test):
        """
        评价模型
        :param X_test:
        :param Y_test:
        :return:
        """
        self.model.evaluate(X_test, Y_test, batch_size=64)

    def application(self, model_path, input_img, ground_truth):
        """
        # 调用模型
        :param input_img:
        :return:
        """
        model_trained = load_model(
            model_path, custom_objects={
                'lr': get_lr_metric(
                    self.model.optimizer)})
        prediction = model_trained.predict(
            input_img.reshape(
                1, 28, 28, 1).astype('float32') / 255.0)
        print("Ground truth: {} \t Prediction: {}".format(
            np.argmax(ground_truth), np.argmax(prediction)))


if __name__ == '__main__':
    model = LeNet()
    X_train, Y_train, X_test, Y_test = load_mnist()
    model.train(X_train, Y_train)
    model.evaluate(X_test, Y_test)
    model.application('./infer_model/lenet_mnist.h5', X_test[3], Y_test[3])
