# -*- coding: UTF-8 -*-

from __future__ import print_function

import sys
from datetime import datetime, timedelta
import pandas as pd
from pandas.plotting import scatter_matrix
from scipy.stats import pearsonr
import numpy as np
import os
import xgboost as xgb
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from features.setGenerator import get_exchange_set, get_all_network_set
from plt_auc import auc_return_sigmoid
import itertools
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE

# 设置输出窗口大小
desired_width = 320
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', 20)
np.set_printoptions(linewidth=desired_width)

data_start_time = '2018-01-01 00:00:00'  # 数据开始时间
data_end_time = '2018-11-19 00:00:00'  # 数据结束时间
test_start_time = '2018-10-01 00:00:00'  # 开始测试时间点
test_end_time = '2018-11-18 00:00:00'  # 结束测试时间点
training_day = 120
test_window_count = 1  # 预测的时间窗(时间窗大小为label时间窗)个数
log_return = True
evaluation_return_threshold = np.log10(1.03) if log_return else 0.03  # 评价 return 结果阈值

# 模型参数
features_step = 60 * 24
labels_step = 60 * 24
predict_step = 60  # 预测结果步长，几分钟预测一次
labels_column_name = 'return'
# 'okex', 'binance2bitmex', 'binance', 'block', 'bitmex_maker', 'omni'
# features_list = ['okex', 'binance2bitmex', 'binance', 'block']
features_list = ['block']
out_chain_features_list = []  # 链外信息
model_list = ['xgboost', 'gbdt']  # 'random_forest'

# 中间结果打印控制
model_verbose = 0  # 2
print_intermediate_results = False

# 特征参数
big_value_threshold = 1000
maker_threshold = 10
exchange_histogram_threshold = 10
exchange_bins = 10
block_histogram_threshold = 100
block_bins = 10

# 路径参数
# features_path = '/data1/features/step-' + str(features_step) + '/'
features_path = '../data/features/step-1440-bk/'
labels_path = '../data/OkEX_BTC_USD_quarter_20180601_20181118.csv'
block_original_data_path = '../data/block.csv'
price_header = 'close'
predict_signal_dir = '../data/predict/'
predict_log_dir = '../data/predict-log/'


def read_features(name):
    """
    读取一个特征文件
    :param name: 特征名
    :return: 特征
    """
    file_path = features_path + name + '-feature-' + str(features_step) + '.csv'
    # 若此特征不存在，则生成
    if not os.path.exists(file_path):
        generate_features(name)
    features = pd.read_csv(file_path)
    print('read features', file_path, ' read shape', features.shape)
    return features


def get_out_of_chain_set():
    """
    获取交易所价格、volume等链外特征
    :return:
    """
    out_chain_features = pd.read_csv(labels_path, index_col=False, header=0)
    if 'price' in out_chain_features_list:
        out_chain_features['price-std'] = out_chain_features[price_header].rolling(features_step).std()
        out_chain_features['price-skew'] = out_chain_features[price_header].rolling(features_step).skew()
        out_chain_features['price-mean'] = out_chain_features[price_header].rolling(features_step).mean()
    if 'volume' in out_chain_features_list:
        out_chain_features['volume-sum'] = out_chain_features['volume'].rolling(features_step).sum()
        out_chain_features['volume-std'] = out_chain_features['volume'].rolling(features_step).std()
        out_chain_features['volume-skew'] = out_chain_features['volume'].rolling(features_step).skew()
        out_chain_features['volume-mean'] = out_chain_features['volume'].rolling(features_step).mean()

    out_chain_features.fillna(0, inplace=True)
    del out_chain_features['open']
    del out_chain_features['high']
    del out_chain_features['low']
    del out_chain_features['close']
    del out_chain_features['volume']
    del out_chain_features['volume-dup']
    return out_chain_features


def get_feature_and_labels():
    """获取特征及label值"""
    features = read_features(features_list[0])
    for features_name in features_list[1:]:
        curr_features_df = read_features(features_name)
        features = features.merge(curr_features_df, on='time', how='left')

    if len(out_chain_features_list) > 0:
        features = features.merge(get_out_of_chain_set(), on='time', how='left')

    # 读取label数据
    # labels = pd.read_csv(labels_path, index_col=False, header=0)[['time', price_header]]
    labels = pd.read_csv(labels_path, index_col=False, header=0)
    print('read labels', labels_path, ' read shape', labels.shape)
    # features个数不能大于label的个数，允许label有缺失值，feature不应该有
    if len(labels) > len(features):
        print('The number of features is greater than the number of labels, please check!')
        exit()
    elif len(labels) < len(features):
        print('--Warning-- Label has missing values and will be populated with the last moment price.')

    labels = features[['time']].merge(labels, on='time', how='left')
    labels = labels.fillna(method='ffill')
    # 计算 return 或 volatility
    if labels_column_name == 'return':
        # return 计算公式 (P(t+n)-P(t+1)) / P(t+1)
        if log_return:
            labels['return'] = np.log10(labels[price_header].shift(-labels_step) / labels[price_header].shift(-1))
        else:
            labels['return'] = (labels[price_header].shift(-labels_step) - labels[price_header].shift(-1)) / labels[
                price_header].shift(-1)
        labels['volatility'] = labels[price_header] \
            .rolling(labels_step, min_periods=1).std().shift(-labels_step)
    elif labels_column_name == 'volatility':
        # volatility 计算公式 std(P(t+1)~P(t+n))
        labels['volatility'] = labels[price_header] \
            .rolling(labels_step, min_periods=1).std().shift(-labels_step)
    return features, labels


def random_forest_regression(train_features, train_labels, test_features):
    """
    随机森林模型
    :param train_features: 训练集feature
    :param train_labels: 训练集label
    :param test_features: 测试集feature
    :return:
    """
    rf = RandomForestRegressor(n_estimators=50, random_state=42, verbose=model_verbose)
    # 模型训练
    rf.fit(train_features, train_labels)
    # 模型预测
    predictions = rf.predict(test_features)
    if print_intermediate_results:
        print('---------------random forest result---------------------')
        print('predict labels:', predictions)

    return predictions


def xgboost_regression(train_features, train_labels, test_features):
    """
    xgboost模型
    :param train_features: 训练集feature
    :param train_labels: 训练集label
    :param test_features: 测试集feature
    :return:
    """
    # reg:gamma reg:tweedie reg:linear
    global xg_reg
    if labels_column_name == 'volatility':
        xg_reg = xgb.XGBRegressor(objective='reg:linear', colsample_bytree=0.3, learning_rate=0.1,
                                  max_depth=5, alpha=10, n_estimators=10)
    elif labels_column_name == 'return':
        xg_reg = xgb.XGBRegressor(learning_rate=0.1, n_estimators=100,
                                  max_depth=5, min_child_weight=1,
                                  gamma=0, subsample=0.8,
                                  colsample_bytree=0.8, objective="reg:linear",
                                  nthread=-1, scale_pos_weight=1, seed=27)
    xg_reg.fit(train_features, train_labels)
    predictions = xg_reg.predict(test_features)
    if print_intermediate_results:
        print('--------------------xgboost result----------------------')
        print('predict labels:', predictions)

    return predictions


def gbdt_regression(train_features, train_labels, test_features):
    """
    gbdt模型
    :param train_features:
    :param train_labels:
    :param test_features:
    :return:
    """
    gbdt = GradientBoostingRegressor(verbose=model_verbose)
    gbdt.fit(train_features, train_labels)
    predictions = gbdt.predict(test_features)
    if print_intermediate_results:
        print('--------------------gbdt result-------------------------')
        print('predict labels:', predictions)

    return predictions


def get_correlation(array_x, array_y):
    """
    计算相关性及RMAS
    :param array_x:
    :param array_y:
    :return:
    """
    pearson_res = pearsonr(array_x, array_y)
    mse = round(np.sqrt(mean_squared_error(array_x, array_y)), 3)
    if print_intermediate_results:
        print("Pearson: ", pearson_res)
        print("RMSE: %f" % mse)

    return {'personr': round(pearson_res[0], 3), 'mse': mse}


def training_and_test(train_features, train_labels, test_features, test_labels):
    """
    模型训练及测试
    :param train_features: 训练集feature
    :param train_labels: 训练集label
    :param test_features: 测试集feature
    :param test_labels: 测试集label
    :return:
    """
    # print(train_features)
    # print(train_labels)
    # print(test_features)
    # print(test_labels)
    result_dict = pd.DataFrame()
    result_dict[labels_column_name] = test_labels[labels_column_name]
    if 'xgboost' in model_list:
        result_dict['xgboost'] = xgboost_regression(train_features=np.array(train_features),
                                                    train_labels=np.array(train_labels[labels_column_name]),
                                                    test_features=np.array(test_features))
    if 'gbdt' in model_list:
        result_dict['gbdt'] = gbdt_regression(train_features=np.array(train_features),
                                              train_labels=np.array(train_labels[labels_column_name]),
                                              test_features=np.array(test_features))
    if 'random_forest' in model_list:
        result_dict['random_forest'] = random_forest_regression(train_features=np.array(train_features),
                                                                train_labels=np.array(
                                                                    train_labels[labels_column_name]),
                                                                test_features=np.array(test_features))

    return result_dict


def generate_features(feature_name):
    if feature_name != 'block':
        get_exchange_set(exchange_name=feature_name, out_path=features_path,
                         start_time=data_start_time, end_time=data_end_time,
                         step_minute=features_step, value_threshold=big_value_threshold,
                         maker_threshold=maker_threshold,
                         histogram_threshold=exchange_histogram_threshold, bins=exchange_bins)
    elif feature_name == 'block':
        get_all_network_set(data_name=block_original_data_path, out_path=features_path,
                            start_time=data_start_time, end_time=data_end_time,
                            step_minute=features_step,
                            histogram_threshold=block_histogram_threshold, bins=block_bins)


def get_mean(column_name, data, out_file):
    threshold_data = data.loc[abs(data['return']) >= evaluation_return_threshold]
    print(column_name + '-------------------')
    print(column_name + '-------------------', file=out_file)
    print('avg_all:', round(abs(data[column_name]).mean(), 4))
    print('avg_all:', round(abs(data[column_name]).mean(), 4), file=out_file)
    print('avg:', round(abs(threshold_data[column_name]).mean(), 4))
    print('avg:', round(abs(threshold_data[column_name]).mean(), 4), file=out_file)
    print('result:', round(abs(threshold_data[column_name]).mean() / abs(data[column_name]).mean(), 4))
    print('result:', round(abs(threshold_data[column_name]).mean() / abs(data[column_name]).mean(), 4), file=out_file)


def calc_label(labels):
    labels = labels.loc[labels.index % 1440 == 0]  # 每天一个点
    start_index = labels.loc[labels['time'] == '2018-06-01 00:00:00']  # 取指定时间后的值
    labels = labels.loc[start_index.index[0]:]
    threshold_labels = labels.loc[abs(labels['return']) > evaluation_return_threshold]
    print(threshold_labels)
    print('labels_df', len(labels), 'threshold_labels', len(threshold_labels), 'proportion',
          float(len(threshold_labels)) / len(labels))
    exit()


def train_and_test(result_path, predict_log):
    # 获取所有的features及labels
    features_df, labels_df = get_feature_and_labels()

    # 获取训练集和测试集
    end_time = datetime.strptime(test_start_time, '%Y-%m-%d %H:%M:%S')
    train_start_time = end_time - timedelta(days=training_day)

    print('======================== argument ==========================', file=predict_log)
    print('features_list', features_list, file=predict_log)
    print('features_step', features_step, 'labels_step', labels_step, file=predict_log)
    print('training_day', training_day, 'test_count', test_window_count, file=predict_log)
    print('features shape', features_df.shape, 'labels shape', labels_df.shape, file=predict_log)
    print('predict label', labels_column_name, file=predict_log)
    print('predict result save path', result_path, file=predict_log)
    print('predict start time', test_start_time, file=predict_log)
    print('predict end time', test_end_time, file=predict_log)
    print('log return', log_return, file=predict_log)

    total_real_and_predict_res = pd.DataFrame()

    # 进行多次模型训练及测试
    number_of_iteration = (datetime.strptime(test_end_time, '%Y-%m-%d %H:%M:%S') - end_time).days
    for i in range(0, number_of_iteration - 1):
        print('=================================== Training and prediction ================================',
              file=predict_log)
        print('training time', train_start_time.strftime('%Y-%m-%d %H:%M:%S'), end_time.strftime('%Y-%m-%d %H:%M:%S'),
              file=predict_log)
        # 获取训练集数据
        start_time_index = features_df.loc[features_df['time'] == train_start_time.strftime('%Y-%m-%d %H:%M:%S')].index[
            0]
        end_time_index = features_df.loc[features_df['time'] == end_time.strftime('%Y-%m-%d %H:%M:%S')].index[0] - 1
        curr_train_features = features_df.loc[start_time_index:end_time_index]
        # 训练集数据去重
        del curr_train_features['time']
        curr_train_features = curr_train_features.loc[~curr_train_features.duplicated()]
        # curr_train_features = curr_train_features[::60]
        # 获取训练集标签数据
        curr_train_labels = labels_df.loc[curr_train_features.index]
        one_test_time = end_time + timedelta(minutes=labels_step)
        # 获取测试集数据
        test_time = []
        for j in range(0, int((test_window_count * labels_step) / predict_step)):
            test_time.append(one_test_time.strftime('%Y-%m-%d %H:%M:%S'))
            one_test_time = one_test_time + timedelta(minutes=predict_step)

        print('test time', test_time[0:5], test_time[-5:], file=predict_log)
        curr_test_features = features_df.loc[features_df['time'].isin(test_time)]
        del curr_test_features['time']
        curr_test_labels = labels_df.loc[curr_test_features.index]
        # 模型训练及预测
        curr_result = training_and_test(train_features=curr_train_features,
                                        train_labels=curr_train_labels,
                                        test_features=curr_test_features,
                                        test_labels=curr_test_labels)
        if 'xgboost' in model_list:
            curr_test_labels['xgboost'] = curr_result['xgboost']

        if 'gbdt' in model_list:
            curr_test_labels['gbdt'] = curr_result['gbdt']

        if 'random_forest' in model_list:
            curr_test_labels['random_forest'] = curr_result['random_forest']

        total_real_and_predict_res = pd.concat([total_real_and_predict_res, curr_test_labels])
        print(total_real_and_predict_res, file=predict_log)
        print('this window correlations:\n%s' % curr_result.corr(), file=predict_log)
        print('total correlations:\n%s' % total_real_and_predict_res.corr(), file=predict_log)

        train_start_time = train_start_time + timedelta(minutes=labels_step * test_window_count)
        end_time = train_start_time + timedelta(days=training_day)

    total_real_and_predict_res = total_real_and_predict_res.reset_index()

    # 输出结果及相关性矩阵
    del total_real_and_predict_res['index']
    # 保存结果到文件
    out_column_names = ['time', labels_column_name]
    out_column_names.extend(model_list)
    total_real_and_predict_res.round(6).to_csv(result_path, index=False, columns=out_column_names)

    return total_real_and_predict_res


if __name__ == '__main__':
    a = 0.85
    if isinstance(a, float):
        print('xxxx')
    exit()
    features_df, labels_df = get_feature_and_labels()
    del features_df['time']
    del labels_df['time']
    # features_df = features_df.iloc[:, 11:14]
    n_point = 60
    features_df = features_df[::n_point][['block_total_tx_count', 'block_total_value', 'block-hist-11']]
    labels_df = labels_df[::n_point][['close', 'return', 'volume', 'volatility']]

    pca = PCA(n_components=4)
    # x = pca.fit_transform(np.array(features_df))

    # x = TSNE(n_components=2).fit_transform(np.array(features_df))
    # x_df = pd.DataFrame(x)

    x_y = features_df.merge(labels_df, left_index=True, right_index=True)
    x_y.dropna(inplace=True)
    print(x_y.shape)
    print(x_y)

    scatter_matrix(x_y, alpha=0.2, figsize=(20, 20), diagonal='kde')
    plt.show()
    exit()
    # x_y = features_df.merge(labels_df, on='time', how='left')
    print(features_df.shape)
    print(labels_df.shape)
    print(x_y.shape)
    print(x_y.head(100))
    # del x_y['time']
    # x_y = np.sin(x_y)
    print(x_y.corr()['close'].sort_values())
    print(x_y.corr()['return'].sort_values())
    exit()
    # 若特征粒度的数据不存在，则首先创建文件夹，后逐个进行特征计算
    if not os.path.exists(features_path):
        os.makedirs(features_path)
    predict_signal_dir = os.path.join(predict_signal_dir, '%s-%s' % (datetime.now().year, datetime.now().month))
    if not os.path.exists(predict_signal_dir):
        os.makedirs(predict_signal_dir)
    predict_log_dir = os.path.join(predict_log_dir, '%s-%s' % (datetime.now().year, datetime.now().month))
    if not os.path.exists(predict_log_dir):
        os.makedirs(predict_log_dir)
    all_features = ['okex', 'bitmex', 'binance', 'huobi',
                    'binance2bitmex', 'block', 'bitmex_maker', 'omni']
    negative_max_auc = 0
    positive_max_auc = 0
    for i in range(1, len(all_features) + 1):
        cur_features = list(itertools.combinations(all_features, i))
        for j in range(0, len(cur_features)):
            features_list = list(cur_features[j])
            print('******************* %s:%s ********************' % (j, features_list))
            cur_timestamp = datetime.now().strftime('%Y%m%d-%H%M%S')
            predict_result_path = os.path.join(predict_signal_dir, '%s-%s.csv' % (labels_column_name, cur_timestamp))
            predict_log_path = os.path.join(predict_log_dir, '%s-%s.log' % (labels_column_name, cur_timestamp))
            print('log path: %s, predict path: %s' % (predict_log_path, predict_result_path))
            predict_log_file = open(predict_log_path, 'a')
            predict_res = train_and_test(result_path=predict_result_path, predict_log=predict_log_file)
            # 输出阈值预测点结果
            if labels_column_name == 'return':
                get_mean('return', predict_res, out_file=predict_log_file)
                get_mean('xgboost', predict_res, out_file=predict_log_file)
                get_mean('gbdt', predict_res, out_file=predict_log_file)
                print('-------------------')
                print('-------------------', file=predict_log_file)
                positive_auc, positive_df = auc_return_sigmoid(predict_res, 'xgboost', True)
                negative_auc, negative_df = auc_return_sigmoid(predict_res, 'xgboost', False)
                print('positive auc: %s, negative auc: %s' % (positive_auc, negative_auc))
                print('positive auc: %s, negative auc: %s' % (positive_auc, negative_auc), file=predict_log_file)
                negative_max_auc = negative_auc if negative_auc > negative_max_auc else negative_max_auc
                positive_max_auc = positive_auc if positive_auc > positive_max_auc else positive_max_auc
                print('max positive auc: %s, max negative auc: %s' % (positive_max_auc, negative_max_auc))
                sys.stdout.flush()
