# -*- coding: UTF-8 -*-

import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from scipy.stats import pearsonr

# 设置输出窗口大小
desired_width = 320
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', 20)


def training(features_rolling_size, labels_rolling_size, labels_name):
    print('------------------------------------------------------------')
    print(labels_name, 'features_rolling_size:', features_rolling_size, ', labels_rolling_size:', labels_rolling_size)
    # Read feature data
    features = pd.read_csv('../data/receivedAndSend_0601_1101_minute.csv')
    # split_time_index = features.loc[features['time'] < 20181001000000].index
    del features['time']
    features = features.rolling(features_rolling_size, min_periods=1).sum()
    # Convert to numpy array
    features.fillna(0, inplace=True)
    # Remove duplicated features
    features = features.loc[~features.duplicated()]
    # Read label data
    price_data = pd.read_csv('../data/OkEX_BTC_USD_quarter_20180601_20181101.csv', index_col=False, header=0)
    if labels_name == 'volatility':
        price_data[labels_name] = price_data['close'].rolling(labels_rolling_size, min_periods=1).std().shift(
            -labels_rolling_size + 1)
    elif labels_name == 'return':
        price_data[labels_name] = (price_data['close'].shift(-labels_rolling_size + 1) - price_data['close']) / \
                                  price_data['close']
    price_data.fillna(0, inplace=True)
    price_data = price_data.loc[features.index]
    print('features.shape', features.shape)
    features = np.array(features)
    labels = np.array(price_data[labels_name])
    # Split the data into training and testing sets
    train_features, test_features, train_labels, test_labels = train_test_split(features, labels, test_size=0.25,
                                                                                random_state=42)
    # print('Training Features Shape:', train_features.shape)
    # print('Training Labels Shape:', train_labels.shape)
    # print('Testing Features Shape:', test_features.shape)
    # print('Testing Labels Shape:', test_labels.shape)

    # Instantiate model with 100 decision trees
    rf = RandomForestRegressor(n_estimators=100, random_state=42)
    # Train the model on training data
    rf.fit(train_features, train_labels)

    # Use the forest's predict method on the test data
    predictions = rf.predict(test_features)
    print("Pearson: ", pearsonr(test_labels, predictions))
    # Calculate the absolute errors
    errors = abs(predictions - test_labels)
    # Print out the mean absolute error (mae)
    print('Mean Absolute Error:', np.mean(errors), 'degrees.')
    print('Feature important: ', rf.feature_importances_)

    # Predict all features
    price_data = price_data.reset_index()
    price_data['predict'] = pd.DataFrame(rf.predict(features))[0]
    return price_data


def get_matric_data(labels_rolling_size):
    labels = pd.read_csv('../data/OkEX_BTC_USD_quarter_20180601_20181101.csv', index_col=False, header=0)
    labels['volatility'] = labels['close'].rolling(labels_rolling_size, min_periods=1).std().shift(
        -labels_rolling_size + 1)
    labels['return'] = (labels['close'].shift(-labels_rolling_size + 1) - labels['close']) / labels['close']
    labels.fillna(0, inplace=True)
    return labels


def trainingByMonth(features_rolling_size, labels_rolling_size, labels_name):
    print('------------------------------------------------------------')
    print(labels_name, 'features_rolling_size:', features_rolling_size, ', labels_rolling_size:', labels_rolling_size)
    # Read feature data
    features = pd.read_csv('../data/receivedAndSend_0601_1101_minute.csv')
    split_time_index = features.loc[features['time'] == 20181001000000].index[0]
    del features['time']
    features = features.rolling(features_rolling_size, min_periods=1).sum()
    features = features.loc[:len(features) - labels_rolling_size]
    # Convert to numpy array
    features.fillna(0, inplace=True)
    # Remove duplicated features
    features = features.loc[~features.duplicated()]
    # Read label data
    price_data = pd.read_csv('../data/OkEX_BTC_USD_quarter_20180601_20181101.csv', index_col=False, header=0)
    if labels_name == 'volatility':
        price_data[labels_name] = price_data['close'].rolling(labels_rolling_size, min_periods=1).std().shift(
            -labels_rolling_size + 1)
    elif labels_name == 'return':
        price_data[labels_name] = (price_data['close'].shift(-labels_rolling_size + 1) - price_data['close']) / \
                                  price_data['close']
    price_data = price_data.loc[:len(price_data) - labels_rolling_size]
    price_data = price_data.loc[features.index]
    print('features.shape', features.shape)
    train_features = np.array(features.loc[:split_time_index])
    test_features = np.array(features.loc[split_time_index:])
    train_labels = np.array(price_data[labels_name].loc[:split_time_index])
    test_labels = np.array(price_data[labels_name].loc[split_time_index:])
    rf = RandomForestRegressor(n_estimators=100, random_state=42)
    # Train the model on training data
    rf.fit(train_features, train_labels)

    # Use the forest's predict method on the test data
    predictions = rf.predict(test_features)
    print("Pearson: ", pearsonr(test_labels, predictions))
    # Calculate the absolute errors
    errors = abs(predictions - test_labels)
    # Print out the mean absolute error (mae)
    print('Mean Absolute Error:', np.mean(errors), 'degrees.')
    print('Feature important: ', rf.feature_importances_)

    # Predict all features
    price_data = price_data.reset_index()
    price_data['predict'] = pd.DataFrame(rf.predict(features))[0]
    return price_data


if __name__ == '__main__':
    # for labels_rolling_size in [10, 20, 30, 40, 50, 60, 60 * 2, 60 * 3, 60 * 4, 60 * 5, 60 * 6, 60 * 12, 60 * 24]:
    #     trainingByMonth(24 * 60, labels_rolling_size, 'return')

    # labels_rolling_size = 60 * 24
    # labels_name = 'return'
    # predict_res = training(24 * 60, labels_rolling_size, labels_name)
    # predict_res.to_csv('predict_' + labels_name + '_day.csv', index=False, columns=['time', labels_name, 'predict'])

    # return_data = pd.read_csv('predict_return_day.csv', index_col=False, header=0)
    # volatility_data = pd.read_csv('predict_volatility_day.csv', index_col=False, header=0)
    # return_data['volatility'] = volatility_data['predict']
    # return_data.to_csv('0601_1030_data.csv', columns=['time', 'predict', 'volatility'], index=False)

    labels_rolling_size = 40
    labels_name = 'return'
    predict_res = trainingByMonth(24 * 60, labels_rolling_size, labels_name)
    predict_res.to_csv('predict_' + labels_name + '_day.csv', index=False, columns=['time', labels_name, 'predict'])
