import pandas as pd
import xgboost
# from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier, XGBRegressor

from sklearn.model_selection import GridSearchCV #网格搜索

from sklearn.metrics import accuracy_score
import numpy as np

from fib_config import close_fib_file, y_column, y_scale, tezheng_v1, data_split_rate
from buy_flow import buy_flow_v2

"""
特征值放大10倍取整
3日、5日收益
准确率 0.8
收益率 20%左右
todo 打印出时间和股票来具体分析
"""


def decision():
    df = pd.read_csv(close_fib_file)
    print(df.columns)
    print(df.shape)
    ### 删除NAN，看下数据的正确性
    df = df.dropna(axis=0, how='any')
    x = df[tezheng_v1].values
    y = np.round(df[y_column], 2)
    data_split = int(df.index.size * data_split_rate)
    # x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.5)
    x_train, x_test = x[: data_split], x[data_split:]
    y_train, y_test = y[: data_split], y[data_split:]
    print("X_train_shape:", x_train.shape, " y_train_shape:", y_train.shape)
    print("X_test_shape:", x_test.shape, "  y_test_shape:", y_test.shape)

    from collections import Counter

    print('Counter(data)\n', Counter(np.around(y_test)))

    def customObj1(real, predict):
        grad = predict - real
        hess = np.abs(grad)
        hess[hess < 0.01] = 0
        # hess = np.power(hess, 2) * 10
        return grad, hess

    cv_params = {
        # 'learning_rate': [0.06, 0.07, 0.08, 0.09, 0.1],
        # 'gamma': [0.05, 0.06, 0.08, 0.09, 0.1],
        # 'subsample': [0.6, 0.7, 0.8, 0.9],
        # 'colsample_bytree': [0.9, 1.0, 1.1, 1.2],
        # 'reg_alpha': [0.03, 0.04, 0.05, 0.06],
        # 'reg_lambda': [1.2, 1.3, 1.4, 1.5]
    }
    # max_depth=2 287 0.0079
    # max_depth=3 289 0.0187
    # max_depth=4 797 0.0092
    # max_depth=5 769 0.0083

    # min_child_weight=9  676 0.0065
    # min_child_weight=8  314 0.004
    # min_child_weight=7  797 0.0092
    # min_child_weight=6  728 0.01
    # min_child_weight=5  1210 0.004

    # n_estimators=25 637 0.01
    # n_estimators=22 858 0.01
    # n_estimators=20 728 0.01
    # n_estimators=15 553 0.009
    # n_estimators=10 598 0.011
    # n_estimators=8  509 0.0056

    # learning_rate=0.08 561 0.007
    # learning_rate=0.09 507 0.013
    # learning_rate=0.1 858 0.01
    # learning_rate=0.11 1941 0.005
    # learning_rate=0.12 2671 0.003

    # gamma=0.1 416 0.0232
    # gamma=0.2 426 0.0236
    # gamma=0.25 425 0.025
    # gamma=0.3 596 0.012

    # subsample=0.9 400 0.01
    # subsample=0.8 873 0.011
    # subsample=0.7 428 0.009

    # reg_lambda=1.8 256 0.0068
    # reg_lambda=1.6 426 0.023
    # reg_lambda=1.5 872 0.01
    # reg_lambda=1.4 873 0.01
    # reg_lambda=1.3 747 0.006

    # reg_alpha=0.03 386 0.021
    # reg_alpha=0.04 426 0.023
    # reg_alpha=0.05 370 0.019

    # colsample_bytree=1.0  425 0.02519
    # colsample_bytree=0.95  441  0.017
    # colsample_bytree=0.9  585 0.012
    # colsample_bytree=0.8  731 0.004
    # colsample_bytree=0.5  1261 0.0027
    other_params = {
        'objective': customObj1,
        'learning_rate': 0.1,
        'n_estimators': 22,
        'max_depth': 4,
        'min_child_weight': 6,
        'seed': 0,
        'subsample': 0.8,
        'colsample_bytree': 1.0,
        'gamma': 0.25,
        'reg_alpha': 0.04,
        'reg_lambda': 1.6,
        "eval_metric": "rmse"
        ,"max_features": "auto"
    }

    dec = XGBRegressor(**other_params)
    # gs = GridSearchCV(estimator=dec, param_grid=cv_params, scoring='r2', cv=5, verbose=1, n_jobs=8)
    # gs.fit(x_train, y_train)
    # print("Best: %f using %s" % (gs.best_score_, gs.best_params_))


    dec.fit(x_train, y_train)
    calc_score = dec.score(x_test, y_test)
    buy_flow_v2(df[data_split:], dec, tezheng_v1)
    print("------------------end: score:" + str(calc_score))

    import matplotlib.pyplot as plt

    from xgboost import plot_importance

    fig, ax = plt.subplots(figsize=(10, 15))
    plot_importance(dec, height=0.5, max_num_features=64, ax=ax)
    plt.show()
    # 参数重要性
    important = dict(zip(tezheng_v1, dec.feature_importances_))
    d_order = sorted(important.items(), key=lambda x: x[1], reverse=False)
    for i in d_order:
        print(i)
    print("end")

    # export_graphviz(dec, feature_names=tezheng, out_file="./tree.dot")
    # dot -Tpdf tree.dot -o tree.pdf


def load_decision():
    dec = XGBRegressor
    dec.load_model("stcok_xgb.model")


if __name__ == '__main__':
    decision()
