# coding=utf-8

from tools import *
from model.LSTM_FC import *
from model.LSTM import *
from model.FC import *
import pandas as pd


def run_grid_comb_separate(train, test, grid_num, time_chunk_size, scalar, output_path,
                           learning_rate, hidden_size, num_layers, fc_hidden_list, lstm_num_epochs, fc_num_epochs):
    y_train = np.array(train['counts'], dtype=np.float32).reshape(grid_num, int(len(train)/grid_num))
    y_test = np.array(test['counts'], dtype=np.float32).reshape(grid_num, int(len(test)/grid_num))
    # 划分为单格的数据
    x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                            dtype=np.float32).reshape((grid_num, 1, int(len(train) / grid_num), time_chunk_size))
    x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                           dtype=np.float32).reshape((grid_num, 1, int(len(test) / grid_num), time_chunk_size))
    x_train_fc = np.array(train.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'], axis=1),
                          dtype=np.float32).reshape((grid_num, int(train.shape[0]/grid_num), train.shape[1]-7))
    x_test_fc = np.array(test.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'], axis=1),
                         dtype=np.float32).reshape((grid_num, int(test.shape[0]/grid_num), train.shape[1]-7))

    index = 0
    win = 0
    for x_tr_ls, x_tr_fc, y_tr, x_te_ls, x_te_fc, y_te in zip(x_train_lstm, x_train_fc, y_train, x_test_lstm, x_test_fc, y_test):
        lstm_train_out, to_fc_tr, lstm_predict, to_fc_te, lstm_loss_changes = \
            LSTM_separate.train_and_predict(x_tr_ls, y_tr, x_te_ls, y_te, grid_num, time_chunk_size,
                                            hidden_size, num_layers, learning_rate, lstm_num_epochs)

        x_tr_fc = np.hstack([x_tr_fc, to_fc_tr])
        x_te_fc = np.hstack([x_te_fc, to_fc_te])

        fc_train_out, fc_test_out, loss_changes = \
            FC.train_and_predict(x_tr_fc, y_tr, x_te_fc, y_te, fc_hidden_list, learning_rate, fc_num_epochs)

        # [mae, mse, mare, r2]
        perfomance_ls = validate(y_te * scalar, lstm_predict * scalar)
        perfomance_fc = validate(y_te * scalar, fc_test_out * scalar)
        if perfomance_fc[0] < perfomance_ls[0]:
            win += 1

        plt.plot([i + 1 for i in range(len(y_te))], y_te * scalar)
        plt.plot([i + 1 for i in range(len(y_te))], lstm_predict * scalar)
        plt.plot([i + 1 for i in range(len(y_te))], fc_test_out * scalar)
        plt.savefig('./data/fig/grid_comb_separate/'+str(train['grid_id'].unique()[index]))
        plt.close()
        index += 1
    print(win, index)


def run_LSTM_separate(train, test, grid_num, time_chunk_size, scalar, output_path,
                      learning_rate, hidden_size, num_layers, fc_hidden_list, lstm_num_epochs, fc_num_epochs):
    y_train = np.array(train['counts'], dtype=np.float32).reshape(grid_num, int(len(train)/grid_num))
    y_test = np.array(test['counts'], dtype=np.float32).reshape(grid_num, int(len(test)/grid_num))

    x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                            dtype=np.float32).reshape((grid_num, 1, int(len(train) / grid_num), time_chunk_size))
    x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                           dtype=np.float32).reshape((grid_num, 1, int(len(test) / grid_num), time_chunk_size))
    index = 0
    for x_tr, x_te, y_tr, y_te in zip(x_train_lstm, x_test_lstm, y_train, y_test):
        lstm_train_out, lstm_predict, lstm_loss_changes = \
            LSTM_separate.train_and_predict(x_tr, y_tr, x_te, y_te, grid_num, time_chunk_size,
                                   hidden_size, num_layers, learning_rate, lstm_num_epochs)
        validate(y_te * scalar, lstm_predict * scalar)

        plt.plot([i + 1 for i in range(len(y_te))], y_te * scalar)
        plt.plot([i + 1 for i in range(len(y_te))], lstm_predict * scalar)
        plt.savefig('./data/fig/lstm/'+str(train['grid_id'].unique()[index]))
        plt.close()
        index += 1


def run_LSTM_FC_separate(train, test, grid_num, time_chunk_size, scalar, output_path,
              learning_rate, hidden_size, num_layers, fc_hidden_list, lstm_num_epochs, fc_num_epochs):
    y_train = np.array(train['counts'], dtype=np.float32).reshape(grid_num, int(len(train)/grid_num))
    y_test = np.array(test['counts'], dtype=np.float32).reshape(grid_num, int(len(test)/grid_num))

    x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                            dtype=np.float32).reshape((grid_num, 1, int(len(train) / grid_num), time_chunk_size))
    x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                           dtype=np.float32).reshape((grid_num, 1, int(len(test) / grid_num), time_chunk_size))
    print(train.shape)
    x_train_fc = np.array(train.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'],
                                     axis=1)).reshape((grid_num, int(train.shape[0]/grid_num), train.shape[1]-7))
    x_test_fc = np.array(test.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'],
                                   axis=1)).reshape((grid_num, int(test.shape[0]/grid_num), train.shape[1]-7))

    index = 0
    for x_tr_ls, x_tr_fc, y_tr, x_te_ls, x_te_fc, y_te in zip(x_train_lstm, x_train_fc, y_train, x_test_lstm, x_test_fc, y_test):
        lstm_train_out, lstm_predict, lstm_loss_changes = \
            LSTM_FC_separate.train_and_predict(x_tr_ls, x_tr_fc, y_tr, x_te_ls, x_te_fc, y_te,
                                               grid_num, time_chunk_size, hidden_size, num_layers, fc_hidden_list,
                                               learning_rate, fc_num_epochs)

        validate(y_te * scalar, lstm_predict * scalar)

        plt.plot([i + 1 for i in range(len(y_te))], y_te * scalar)
        plt.plot([i + 1 for i in range(len(y_te))], lstm_predict * scalar)
        plt.savefig('./data/fig/comb/'+str(train['grid_id'].unique()[index]))
        plt.close()
        index += 1


def run_model(train, test, grid_num, time_chunk_size, scalar, output_path,
              learning_rate, hidden_size, num_layers, fc_hidden_list, lstm_num_epochs, fc_num_epochs):
    y_train = np.array(train['counts'], dtype=np.float32).reshape(len(train))
    y_test = np.array(test['counts'], dtype=np.float32).reshape(len(test))

    results = []

    x_train_lstm = np.array(train[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                            dtype=np.float32).reshape((grid_num, 1, int(len(train) / grid_num), time_chunk_size))
    x_test_lstm = np.array(test[['last1', 'last2', 'last3', 'last4', 'last5', 'last6']],
                           dtype=np.float32).reshape((grid_num, 1, int(len(test) / grid_num), time_chunk_size))
    x_train_fc = np.array(train.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'], axis=1))
    x_test_fc = np.array(test.drop(['last1', 'last2', 'last3', 'last4', 'last5', 'last6', 'counts'], axis=1))

    lstm_train_out, lstm_predict, lstm_loss_changes = \
        LSTM.train_and_predict(x_train_lstm, y_train, x_test_lstm, y_test, grid_num, time_chunk_size,
                               hidden_size, num_layers, learning_rate, lstm_num_epochs)
    combine_train_out, combine_predict, combine_loss_changes = \
        LSTM_FC.train_and_predict(x_train_lstm, x_train_fc, y_train, x_test_lstm, x_test_fc, y_test,
                                  grid_num, time_chunk_size, hidden_size, num_layers, fc_hidden_list,
                                  learning_rate, fc_num_epochs)

    save_excel([combine_loss_changes, lstm_loss_changes], output_path+'loss.csv')
    results.append(validate(y_test * scalar, lstm_predict * scalar))
    results.append(validate(y_test * scalar, combine_predict * scalar))
    save_excel(results, output_path+'result.csv')


def main():
    place = 'xm'
    if place == 'xm':
        num = 20
        interval = 60
        time_chunk_size = 6
        data, scalar = load_xm(num, interval, time_chunk_size)
    elif place == 'cd':
        num = 100
        interval = 60
        time_chunk_size = 6
        data = pd.read_csv('./data/' + str(interval) + '_chengdu_feature_data.data')
        scalar = 1397

    # data = data.drop(['Food', 'Hotel', 'Transport', 'Life',
    #                   'Attractions', 'Entertainment', 'Sport', 'Education', 'Media',
    #                   'Medical', 'Shopping', 'Car_Service', 'Financial', 'Estate', 'Company',
    #                   'Government', 'Gateway', 'Natural_features'], axis=1)

    # 基础通用设置
    learning_rate = 0.01
    lstm_num_epochs = 400
    fc_num_epochs = 2000
    split_date = 7
    grid_num = len(data['grid_id'].unique())
    # 模型设置
    hidden_size = 16  # LSTM节点数设置，同时也是LSTM输入FC的维度数
    num_layers = 4  # LSTM层数设置
    fc_hidden_list = [32, 12]  # FC隐藏层神经元个数设置
    # embed设置
    need_embed = False
    embedding_cols = ['month_day', 'time_chunk', 'grid_id', 'hour']
    embedding_dims = [2, 2, 2, 2]
    # 输出信息设置
    info = str(num) + '_' + str(interval) + 'try' + place
    output_path = './data/result/' + info

    if need_embed:
        train, test = embed(data, split_date, embedding_cols, embedding_dims)
    else:
        train = data[data.month_day <= split_date]
        test = data[data.month_day > split_date]
        test = test[test.month_day > 15]

    # 重复多次实验
    # if not os.path.exists(output_path):
    #    os.makedirs(output_path)
    # for i in range(5):
    #     output_path = './data/result/' + info + '/' + str(i)
    #     run_model(train, test, grid_num, time_chunk_size, scalar, learning_rate, num_epochs, output_path)

    run_grid_comb_separate(train, test, grid_num, time_chunk_size, scalar, output_path,
              learning_rate, hidden_size, num_layers, fc_hidden_list, lstm_num_epochs, fc_num_epochs)


main()
