# -*- coding: UTF-8 -*-

import os
import json
import sys

import matplotlib.pyplot as plt
from core.data_processor import DataLoader
from core.model import Model
import pandas as pd
import xgboost as xgb
import datetime as dt
import numpy as np

# 设置输出窗口大小
desired_width = 320
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', 20)


def plot_results(xtime, predicted_data, true_data, title):
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)

    draw_periods = int(len(true_data) / 15)
    x = xtime[::draw_periods].str.slice(0, 10).tolist()
    tick = np.arange(len(true_data), step=draw_periods)

    ax.plot(true_data, label='True Data')
    ax.set_xticks(tick, x)

    plt.plot(predicted_data, label='Prediction')
    plt.xticks(tick, x)
    plt.title(title)
    plt.gcf().autofmt_xdate()
    plt.legend()
    plt.show()


def plot_results_multiple(predicted_data, true_data, prediction_len):
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)
    ax.plot(true_data, label='True Data')
    # Pad the list of predictions to shift it in the graph to it's correct start
    for i, data in enumerate(predicted_data):
        padding = [None for p in range(i * prediction_len)]
        plt.plot(padding + data, label='Prediction')
        plt.legend()
    plt.show()


def xgboost_regression(train_features, train_labels, test_features,
                       predict_type, window_size, sequence_length, training_date):
    """
    xgboost模型
    :param train_features: 训练集feature
    :param train_labels: 训练集label
    :param test_features: 测试集feature
    :param predict_type 预测类型, return, volatility
    :param window_size 窗口大小 15, 30, 60
    :param sequence_length 环比窗口大小 72, 132
    :param training_date 分割训练与测试的日期
    :return:
    """
    xg_reg = xgb.XGBRegressor(learning_rate=0.1, n_estimators=100,
                              max_depth=5, min_child_weight=1,
                              gamma=0, subsample=0.8,
                              colsample_bytree=0.8, objective="reg:linear",
                              nthread=-1, scale_pos_weight=1, seed=27, silent=False)
    train_features = train_features.reshape(train_features.shape[0],
                                            train_features[0].shape[0] * train_features[0].shape[1])
    test_features = test_features.reshape(test_features.shape[0], test_features[0].shape[0] * test_features[0].shape[1])
    xg_reg.fit(train_features, train_labels, verbose=True)
    model_file_name = "%s-%d-%d-%s.xgb" % (predict_type, window_size, sequence_length, training_date)
    xg_reg.save_model('saved_models_2019/%s' % model_file_name)

    # xg_reg = xgb.XGBRegressor()
    # booster = xgb.Booster()
    # booster.load_model('./return-60.xgboost')
    # xg_reg._Booster = booster

    predictions = xg_reg.predict(test_features)
    return predictions


def train_and_test(configs, x, y, x_test, predict_type, window_size, sequence_length, training_date):
    if configs['model']['name'] == 'xgboost':
        return xgboost_regression(x, y, x_test, predict_type, window_size, sequence_length, training_date)
    elif configs['model']['name'] == 'lstm':
        model = Model()
        model.build_model(configs, x.shape[2])
        # in-memory training
        model.train(
            x,
            y,
            epochs=configs['training']['epochs'],
            batch_size=configs['training']['batch_size'],
            save_dir=configs['model']['save_dir']
        )
        # out-of memory generative training
        # steps_per_epoch = math.ceil(
        #     (data.len_train - configs['data']['sequence_length']) / configs['training']['batch_size'])
        # model.train_generator(
        #     data_gen=data.generate_train_batch(
        #         seq_len=configs['data']['sequence_length'],
        #         batch_size=configs['training']['batch_size'],
        #         normalise=configs['data']['normalise']
        #     ),
        #     epochs=configs['training']['epochs'],
        #     batch_size=configs['training']['batch_size'],
        #     steps_per_epoch=steps_per_epoch,
        #     save_dir=configs['model']['save_dir']
        # )

        # predictions = model.predict_sequences_multiple(x_test, configs['data']['sequence_length'],
        #                                                configs['data']['sequence_length'])
        # predictions = model.predict_sequence_full(x_test, configs['data']['sequence_length'])

        # plot_results_multiple(predictions, y_test, configs['data']['sequence_length'])
        return model.predict_point_by_point(x_test)
    else:
        print('Unsupported model type')
        exit()


def main(training_config_file_name):
    """
    训练并且保存模型
    :param training_config_file_name: 配置文件，包括return、volatility配置，window、seq_len配置
    :return:
    """
    configs = json.load(open(training_config_file_name, 'r'))
    if not os.path.exists(configs['model']['save_dir']):
        os.makedirs(configs['model']['save_dir'])
    if not os.path.exists(configs['data']['predict_save_dir']):
        os.makedirs(configs['data']['predict_save_dir'])

    data = DataLoader(configs)

    x, y = data.get_train_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    x_test, y_test = data.get_test_data(
        seq_len=configs['data']['sequence_length'],
        normalise=configs['data']['normalise'])

    predict_type = configs['predict']['signal']
    window_size = int(configs['predict']['window'])
    sequence_length = int(configs['data']['sequence_length'])
    train_test_split = configs['data']['train_test_split']
    train_test_split = train_test_split[: 10]
    train_test_split = train_test_split.replace('-', '')
    predictions = train_and_test(configs, x, y, x_test, predict_type, window_size, sequence_length, train_test_split)
    plot_results(pd.Series(data.get_test_time()[:, 0]), predictions, y_test, 'original predict and true data')

    real_y_column = 'real_' + configs['predict']['signal']
    predict_y_column = 'predict_' + configs['predict']['signal']
    result = pd.DataFrame(data.get_test_time(), columns=['time', real_y_column])
    result['y0'] = data.get_test_y0()
    result['predict_res'] = predictions
    result['real_res'] = y_test
    result[predict_y_column] = (result['predict_res'] + 1) * result['y0']
    if configs['predict']['signal'] == 'price':
        result['real_return'] = result[real_y_column] / result[real_y_column].shift(1) - 1
        result['predict_return'] = result[predict_y_column] / result[real_y_column].shift(1) - 1

    result_save_path = os.path.join(configs['data']['predict_save_dir'],
                                    'predict_result_%s_%d_%d_%s_%s.csv' % (predict_type, window_size,
                                                                           sequence_length, train_test_split,
                                                                           dt.datetime.now().strftime('%Y%m%d-%H%M%S')))
    print('result saved path: %s' % result_save_path)
    result = result.round(5)
    result.to_csv(result_save_path, index=False)
    evaluate_result(result_save_path, configs['predict']['signal'])


def evaluate_result(path, predict_signal):
    real_y_column = 'real_' + predict_signal if predict_signal != 'price' else 'real_return'
    predict_y_column = 'predict_' + predict_signal if predict_signal != 'price' else 'predict_return'
    data = pd.read_csv(path)

    plot_results(data['time'], data[predict_y_column], data[real_y_column], 'predict and true ' + predict_signal)
    print(data)
    print(data.describe())
    print(data.corr())
    threshold = sorted(data[real_y_column].abs(), reverse=True)[int(len(data) * 0.2)]
    threshold_data = data.loc[abs(data[real_y_column]) >= threshold]
    print('\nthreshold is %s, percentage is %s' % (threshold, (float(len(threshold_data)) / len(data))))
    print('--------- predict %s -------' % (predict_signal if predict_signal != 'price' else 'return'))
    predict_avg_all = abs(data[predict_y_column]).mean()
    predict_threshold_avg = abs(threshold_data[predict_y_column]).mean()
    print('avg_all: %s' % round(predict_avg_all, 4))
    print('avg: %s' % round(predict_threshold_avg, 4))
    print('result: %s' % round((predict_threshold_avg / predict_avg_all), 4))
    print('---------- real %s ---------' % (predict_signal if predict_signal != 'price' else 'return'))
    real_avg_all = abs(data[real_y_column]).mean()
    real_threshold_avg = abs(threshold_data[real_y_column]).mean()
    print('avg_all: %s' % round(real_avg_all, 4))
    print('avg: %s' % round(real_threshold_avg, 4))
    print('result: %s' % round((real_threshold_avg / real_avg_all), 4))
    print('---------------------------------')
    if predict_signal in ['return', 'price']:
        print('real positive count: %s, negative count: %s, percentage: %s' % (
            len(data.loc[data[real_y_column] > 0]), len(data.loc[data[real_y_column] < 0]),
            len(data.loc[data[real_y_column] > 0]) / float(len(data))))
        print('predict positive count: %s, negative count: %s, percentage: %s' % (
            len(data.loc[data[predict_y_column] > 0]), len(data.loc[data[predict_y_column] < 0]),
            len(data.loc[data[predict_y_column] > 0]) / float(len(data))))
        data['res'] = data[real_y_column] * data[predict_y_column]
        true_count = len(data.loc[data['res'] > 0])
        false_count = len(data.loc[data['res'] < 0])
        print('true count: %s, false count: %s, correct rate: %s' % (
            true_count, false_count, float(true_count) / (false_count + true_count)))


if __name__ == '__main__':
    if len(sys.argv) < 2:
        print("usage: python ./Training.py [config_file_name]")
        print("examples: python ./Training.py config/config-return-15-72.json")
        exit()
    config_file_name = sys.argv[1]
    main(config_file_name)
