"""
多分类问题：mnist手写数字(CNN)
"""
from keras.utils import to_categorical
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
# Conv1D：一维：时序；Conv2D：二维：图形
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from utils.plot_graph import keras_plot_model
from utils.search_model_paramter import grid_search
from utils.model_fit import train_model


def get_data():
    """

    :return:
    """
    (train_images, train_label), (test_images, test_label) = mnist.load_data()
    # (?, 28, 28), CNN输入为4D张量
    train_images = \
        train_images.reshape((train_images.shape[0],
                              train_images.shape[1],
                              train_images.shape[2],
                              1))
    train_images = train_images.astype(float)
    train_images /= 255
    # 将整型标签转为onehot
    train_label = to_categorical(train_label)

    test_images = \
        test_images.reshape((test_images.shape[0],
                             test_images.shape[1],
                             test_images.shape[2],
                             1))

    test_images = test_images.astype(float)
    test_images /= 255

    test_label = to_categorical(test_label)

    return train_images, train_label, test_images, test_label


def build_fit_model(train_x, train_y, test_x, test_y,
                    unit, act, opt, loss, epochs, batch_size, verbose,
                    pic_path, model_name, workers=4,
                    use_multiprocessing=True, shuffle=True, plot=False,
                    rnn_max_pool_size=(2,2), **options):
    """

    :param train_x:
    :param train_y:
    :param test_x:
    :param test_y:
    :param unit:
    :param act:
    :param opt:
    :param loss:
    :param epochs:
    :param batch_size:
    :param verbose:
    :param pic_path:
    :param model_name:
    :param kernel_size:
    :param drop_p:
    :param workers:
    :param use_multiprocessing:
    :param shuffle:
    :param plot:
    :param rnn_max_pool_size:
    :return:
    """
    kernel_size = options.get('kernel_size', None)
    if not kernel_size:
        return
    drop_p = options.get('drop_p', None)
    if not drop_p:
        return
    model = Sequential()
    # 卷积层
    model.add(Conv2D(filters=unit[0],
                     kernel_size=kernel_size,
                     activation='relu',
                     input_shape=(train_x.shape[1],
                                  train_x.shape[2],
                                  train_x.shape[3])))
    # 池化
    """
    池化: 
    1、一种形式的降采样
    2、将输入的图像划分为若干个矩形区域，对每个子区域输出最大值
    3、池化层通常会分别作用于每个输入的特征并减小其大小
    """
    model.add(MaxPooling2D(pool_size=rnn_max_pool_size))
    model.add(Conv2D(filters=unit[0] * 2,
                     kernel_size=kernel_size,
                     activation='relu',
                     input_shape=(int(train_x.shape[1] / 2),
                                  int(train_x.shape[2] / 2),
                                  train_x.shape[3])))
    # 池化
    model.add(MaxPooling2D(pool_size=rnn_max_pool_size))

    # 随机drop，防止overfit
    model.add(Dropout(drop_p / 2))

    # 全链接层
    model.add(Flatten())
    model.add(Dense(units=unit[1], activation='relu'))
    model.add(Dropout(drop_p))
    model.add(Dense(units=train_y.shape[1], activation='softmax'))

    model.compile(optimizer=opt,
                  loss=loss,
                  metrics=['accuracy'])

    return train_model(model=model,
                       train_x=train_x, train_y=train_y,
                       test_x=test_x, test_y=test_y,
                       epochs=epochs,
                       model_name=model_name,
                       pic_path=pic_path,
                       shuffle=shuffle,
                       verbose=verbose,
                       plot=plot,
                       batch_size=batch_size,
                       workers=workers,
                       use_multiprocessing=use_multiprocessing)


def multi_class_cnn_run(workers):
    """

    :return:
    """
    get_parameter = False
    train_model = True

    kernel_size_arr = [(4, 4), (5, 5), (6, 6)]
    # kernel_size_arr = [(5, 5)]
    # unit_arr = [16, 32, 64, 128, 256, 512]
    # unit_arr = [[16], 32, 64]
    unit_arr = [[32, 128], [32, 256], [32, 512]]
    drop_p_arr = [0.5, 0.6, 0.65]
    #  drop_p_arr = [0]
    # 32, 1024
    act_arr = [['relu', 'softmax']]
    # opt_arr = ['RMSprop', 'Adam', 'Adadelta', 'Nadam']
    # opt_arr = ['RMSprop', 'Adam', 'Nadam']
    # opt_arr = ['Adam', 'Nadam']
    opt_arr = ['Adam']
    loss_arr = ['categorical_crossentropy']
    epochs = 20
    batch_size = 300

    train_images, train_label, test_images, test_label = get_data()
    # 参数选择
    if get_parameter:
        for drop_p in drop_p_arr:
            for kernel_size in kernel_size_arr:
                print('-' * 100)
                print('drop_p : {}, kernel_size: {}'.format(drop_p, kernel_size))
                grid_search(func=build_fit_model,
                            train_x=train_images, train_y=train_label,
                            test_x=test_images, test_y=test_label,
                            unit_arr=unit_arr,
                            act_arr=act_arr,
                            opt_arr=opt_arr,
                            loss_arr=loss_arr,
                            epochs=epochs,
                            batch_size=batch_size,
                            workers=workers,
                            use_multiprocessing=True,
                            shuffle=True,
                            plot=False,
                            kernel_size=kernel_size,
                            drop_p=drop_p)

    # 模型训练
    if train_model:
        model, loss_metrics = \
            build_fit_model(train_x=train_images, train_y=train_label,
                            test_x=test_images, test_y=test_label,
                            unit=[32, 128],
                            act=['relu', 'softmax'],
                            # opt='RMSprop',
                            opt='Nadam',
                            loss='categorical_crossentropy',
                            pic_path='../pic',
                            model_name='多分类RNN',
                            epochs=epochs,
                            batch_size=batch_size,
                            verbose=1,
                            workers=workers,
                            use_multiprocessing=True,
                            shuffle=True,
                            plot=True,
                            kernel_size=(5,5),
                            drop_p=0.5)
        print('loss_metrics: {}'.format(loss_metrics))
        # plot model
        keras_plot_model(model=model, pic='../pic/多分类RNN.png')
