from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import GridSearchCV,KFold
from sklearn.metrics import mean_squared_error
import numpy as np
import pandas as pd

def get_RF():
    clf = RandomForestRegressor(
        criterion="mse",
        min_weight_fraction_leaf=0.,
        max_leaf_nodes=None,
        min_impurity_decrease=0.,
        min_impurity_split=None,
        bootstrap=True,
        oob_score=False,
        n_jobs=4,
        random_state=2020,
        verbose=0,
        warm_start=False)

    return clf

def train_predict(train, test, best_clf):
    kf = KFold(n_splits=5, random_state=2020, shuffle=True)
    label_name = 'label_series'
    prediction_test = 0
    cv_score = []
    prediction_train = pd.Series()

    for train_part_index, eval_index in kf.split(train[features], train[label_name]):
        best_clf.fit(train[features].iloc[train_part_index].values, train[label_name].iloc[train_part_index].values)
        prediction_test += best_clf.predict(test[features].values)
        eval_pre = best_clf.predict(train[features].iloc[eval_index].values)
        score = np.sqrt(mean_squared_error(train[label_name].iloc[eval_index].values, eval_pre))
        cv_score.append(score)
        print(score)
        prediction_train = prediction_train.append(pd.Series(best_clf.predict(train[features].iloc[eval_index]),
                                                             index=eval_index))
    print(cv_score, sum(cv_score) / 5)
    pd.Series(prediction_train.sort_index().values).to_csv("train_randomforest.csv", index=False)
    pd.Series(prediction_test / 5).to_csv("test_randomforest.csv", index=False)
    test['target'] = prediction_test / 5
    test[['card_id', 'target']].to_csv("submission_randomforest.csv", index=False)

def get_grid(clf, parameter_space):
    grid = GridSearchCV(clf, parameter_space, cv=2, scoring="neg_mean_squared_error")
    return grid


if __name__ == '__main__':
    from logic.global_objs import D
    from engine.data.data_handler import DataHandler

    fields, names = DataHandler().get_kbar_fields_names()

    fields.append('Ref($close,-1)/$close -1')
    names.append('label_series')

    fields.append('QCut($label_series,10)')
    names.append('label')

    df_all = D.load(['000300.SH', '000905.SH', '399006.SZ'], start_time='20100101', fields=fields, names=names)
    print(df_all.head())
    df_train = df_all[:'20151231']
    df_test = df_all['20151231':]

    parameter_space = {
        "n_estimators": [80],
        "min_samples_leaf": [30],
        "min_samples_split": [2],
        "max_depth": [9],
        "max_features": ["auto", 80]
    }
    grid = get_grid(get_RF(), parameter_space)

    features = names.copy()
    features.remove('label_series')
    features.remove('label')

    train = df_train[features]
    train_y = df_train['label_series']


    grid.fit(train.values, train_y.values)

    print("best_params_:")
    print(grid.best_params_)
    means = grid.cv_results_["mean_test_score"]
    stds = grid.cv_results_["std_test_score"]
    for mean, std, params in zip(means, stds, grid.cv_results_["params"]):
        print("%0.3f (+/-%0.03f) for %r"
              % (mean, std * 2, params))
    best_clf = grid.best_estimator_

    train_predict(df_train,df_test,best_clf)