"""
视频 卷积LSTM 模型
"""
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.convolutional import Conv3D
from keras.layers.convolutional_recurrent import ConvLSTM2D
from keras.layers.normalization import BatchNormalization
from utils.plot_graph import keras_plot_model
from utils.search_model_paramter import grid_search
from utils.model_fit import train_model


def generate_movies(n_samples=1200, n_frames=15):
    """

    :param n_samples:
    :param n_frames:
    :return:
    """
    row = 80
    col = 80
    # 初始化
    noisy_movies = np.zeros((n_samples, n_frames, row, col, 1),
                            dtype=np.float)
    shifted_movies = np.zeros((n_samples, n_frames, row, col, 1),
                              dtype=np.float)

    for sample in range(n_samples):
        random_num = np.random.randint(low=3, high=8)  # size返回的是数组

        for j in range(random_num):
            # 位置
            row_start = np.random.randint(low=20, high=60)
            col_start = np.random.randint(low=20, high=60)

            # 方向
            row_direct = np.random.randint(low=0, high=3) - 1
            col_direct = np.random.randint(low=0, high=3) - 1

            # size
            size_num = np.random.randint(low=2, high=4)

            for frame in range(n_frames):
                row_shift = row_start + row_direct * frame
                col_shift = col_start + col_direct * frame

                noisy_movies[sample,
                             frame,
                             row_shift - size_num: row_shift + size_num,
                             col_shift - size_num: col_shift + size_num,
                             0] += 1
                # noisy
                if np.random.randint(low=0, high=2):
                    noisy_value = np.power(-1, np.random.randint(low=0, high=2))
                    noisy_movies[sample,
                                 frame,
                                 row_shift - size_num - 1:
                                 row_shift + size_num + 1,

                                 col_shift - size_num - 1:
                                 col_shift + size_num + 1,

                                 0] += noisy_value * 0.1
                # shift
                shift_step = 1
                shifted_movies[sample,
                               frame,
                               row_shift + row_direct * shift_step + size_num:
                               row_shift + row_direct * shift_step - size_num,

                               col_shift + col_direct * shift_step + size_num:
                               col_shift + col_direct * shift_step - size_num,

                               0] += 1
    # 切分为 40 * 40 的窗口
    noisy_movies = noisy_movies[::, ::, 20:60, 20:60, ::]
    shifted_movies = shifted_movies[::, ::, 20:60, 20:60, ::]

    # 异常值处理
    noisy_movies[noisy_movies >= 1] = 1
    shifted_movies[shifted_movies >= 1] = 1

    return noisy_movies, shifted_movies


def build_fit_model(train_x, train_y, test_x, test_y,
                    unit, act, opt, loss, epochs, batch_size, verbose,
                    pic_path, model_name, workers=4,
                    use_multiprocessing=True, shuffle=True, plot=False,
                    **options):
    """

    :param train_x:
    :param train_y:
    :param test_x:
    :param test_y:
    :param unit:
    :param act:
    :param opt:
    :param loss:
    :param epochs:
    :param batch_size:
    :param verbose:
    :param pic_path:
    :param model_name:
    :param kernel_size:
    :param workers:
    :param use_multiprocessing:
    :param shuffle:
    :param plot:
    :return:
    """
    kernel_size = options.get('kernel_size', None)
    if not kernel_size:
        return
    model = Sequential()
    model.add(ConvLSTM2D(filters=unit,
                         kernel_size=kernel_size[0],
                         input_shape=(None,
                                      train_x.shape[2],
                                      train_x.shape[3],
                                      train_x.shape[4]),
                         padding='same',
                         return_sequences=True))
    # 批量标准化层
    model.add(BatchNormalization())

    # model.add(ConvLSTM2D(filters=unit,
    #                      kernel_size=kernel_size[0],
    #                      padding='same',
    #                      return_sequences=True))
    # model.add(BatchNormalization())
    #
    # model.add(ConvLSTM2D(filters=unit,
    #                      kernel_size=kernel_size[0],
    #                      padding='same',
    #                      return_sequences=True))
    # model.add(BatchNormalization())
    #
    # model.add(ConvLSTM2D(filters=unit,
    #                      kernel_size=kernel_size[0],
    #                      padding='same',
    #                      return_sequences=True))
    # model.add(BatchNormalization())

    model.add(Conv3D(filters=train_y.shape[-1],
                     kernel_size=kernel_size[1],
                     activation=act,
                     padding='same',
                     data_format='channels_last'))

    model.compile(loss=loss, optimizer=opt)

    return train_model(model=model,
                       train_x=train_x, train_y=train_y,
                       test_x=test_x, test_y=test_y,
                       epochs=epochs,
                       model_name=model_name,
                       pic_path=pic_path,
                       shuffle=shuffle,
                       verbose=verbose,
                       plot=plot,
                       batch_size=batch_size,
                       workers=workers,
                       use_multiprocessing=use_multiprocessing)


def conv_lstm_run(workers):
    """
    主程序
    :param workers:
    :return:
    """
    get_parameter = False
    train_model = True

    kernel_size_arr = [[(2, 2), (2, 2, 2)],
                       [(3, 3), (3, 3, 3)],
                       [(4, 4), (4, 4, 4)]]
    unit_arr = [30, 40, 50]
    act_arr = ['sigmoid']
    opt_arr = ['RMSprop', 'Adam', 'Adadelta', 'Nadam']
    loss_arr = ['binary_crossentropy']
    """
    padding_arr = ['valid', 'same']
    
    valid：不填充,尺寸会变小
    same：进行左右上下的补齐, 输入和输出尺寸会保持一致
        其中左,上依次补齐 flood (kernel_size -1 ) / 2 , 
        右下补齐ceil (( kernel_size - 1) /2 ) ,
        补齐后进行的操作就是类似valid下的滑动卷积
        output_shape = ceil (input_shape / stride)
    """
    epochs = 20
    batch_size = 100

    train_data, test_data = generate_movies()
    train_x = train_data[:1000]
    test_x = train_data[1000:]
    train_y = test_data[:1000]
    test_y = test_data[1000:]

    # 参数选择
    if get_parameter:
        for kernel_size in kernel_size_arr:
            print('-' * 100)
            print('kernel_size: {}'.format(kernel_size))
            grid_search(func=build_fit_model,
                        train_x=train_x, train_y=train_y,
                        test_x=test_x, test_y=test_y,
                        unit_arr=unit_arr,
                        act_arr=act_arr,
                        opt_arr=opt_arr,
                        loss_arr=loss_arr,
                        epochs=epochs,
                        batch_size=batch_size,
                        workers=workers,
                        use_multiprocessing=True,
                        shuffle=True,
                        plot=False,
                        kernel_size=kernel_size)

    # 模型训练
    if train_model:
        model, loss_metrics = \
            build_fit_model(train_x=train_x, train_y=train_y,
                            test_x=test_x, test_y=test_y,
                            unit=60,
                            act='sigmoid',
                            opt='RMSprop',
                            loss='binary_crossentropy',
                            pic_path='../pic',
                            model_name='CNN_LSTM',
                            epochs=epochs,
                            batch_size=batch_size,
                            verbose=1,
                            workers=workers,
                            use_multiprocessing=True,
                            shuffle=True,
                            plot=True,
                            kernel_size=[(4, 4), (4, 4, 4)])
        print('loss_metrics: {}'.format(loss_metrics))
        # plot model
        keras_plot_model(model=model, pic='../pic/CNN_LSTM.png')

        # predict
        idx = 1024
        track = train_data[idx][:7, ::, ::, ::]
        for j in range(16):
            new_pos = model.predict(track[np.newaxis, ::, ::, ::, ::])
            new = new_pos[::, -1, ::, ::, ::]
            track = np.concatenate((track, new), axis=0)

        track_true = train_data[idx][::, ::, ::, ::]
        for i in range(15):
            fig = plt.figure(figsize=(10, 5))
            ax = fig.add_subplot(121)
            if i >= 7:
                ax.text(1, 3, 'predictions', fontsize=20, color='w')
            else:
                ax.text(1, 3, 'Initial trajectory', fontsize=20)

            toplot = track[i, ::, ::, 0]

            plt.imshow(toplot)

            ax = fig.add_subplot(122)
            plt.text(1, 3, 'Ground truth', fontsize=20)

            tolplot = track_true[i, ::, ::, 0]
            if i >= 2:
                tolplot = test_data[idx][i - 1, ::, ::, 0]

            plt.imshow(tolplot)
            plt.savefig('../pic/test/{}_animate.png'.format(i + 1))
