import pandas as pd
import xgboost
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier, XGBRegressor

from sklearn.metrics import accuracy_score
import numpy as np

from fib_config import close_fib_file, y_column, y_scale, tezheng_v1, data_split_rate
from buy_flow import buy_flow_v2

"""
特征值放大10倍取整
3日、5日收益
准确率 0.8
收益率 20%左右
todo 打印出时间和股票来具体分析
"""


def decision():
    df = pd.read_csv(close_fib_file)
    print(df.columns)
    print(df.shape)
    ### 删除NAN，看下数据的正确性
    df = df.dropna(axis=0, how='any')
    x = df[tezheng_v1].values
    y = np.round(df[y_column], 2)
    data_split = int(df.index.size * data_split_rate)
    # x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.5)
    x_train, x_test = x[: data_split], x[data_split:]
    y_train, y_test = y[: data_split], y[data_split:]
    print("X_train_shape:", x_train.shape, " y_train_shape:", y_train.shape)
    print("X_test_shape:", x_test.shape, "  y_test_shape:", y_test.shape)

    from collections import Counter

    print('Counter(data)\n', Counter(np.around(y_test)))
    # max_depth 5 或 6
    # 剪纸
    # 随机森林
    # dec = DecisionTreeClassifier(max_depth=6)
    # dec.fit(x_train, y_train.astype('int'))


    ## todo 损失函数自定义， 差异小的直接忽略，让它等于0，
    def customObj1(real, predict):
        grad = predict - real

        hess = np.abs(grad)
        # hess = np.power(np.abs(grad), 0.5)
        # hess = np.power(np.abs(grad), 2) * 20
        # hess[hess > 0.2] = 0.2
        # hess[hess > 0.1] = np.power(hess[hess > 0.1], 2) * 5
        hess = np.power(hess, 2) * 10
        # hess[hess > 0.25] = 0.25

        return grad, hess
    #
    from sklearn.preprocessing import StandardScaler
    dec = XGBRegressor(objective=customObj1,  # objective=""
                       eval_metric="rmse",
                       # booster="gblinear", ??
                       max_depth=5,
                       learning_rate=0.1,
                       max_features="auto" # 'auto', 'sqrt', 'log2'
                       )
    dec.fit(x_train, y_train)
    # dec.save_model("stcok_xgb_1.1.model")
    # dec.load_model("stcok_xgb_1.model")
    # print("----", dec.score(x_train, y_train.astype('int')))
    calc_score = dec.score(x_test, y_test)

    # y_pred = dec.predict(x_test)
    from sklearn.metrics import mean_squared_error

    # rmse = np.sqrt(mean_squared_error(y_train, y_pred))
    # print("RMSE: %f" % (rmse))

    # accuracy = accuracy_score(y_test, y_pred)
    # print("accuarcy: %.2f%%" % (accuracy * 100.0))
    # buy_flow(x_test, y_test, dec)
    buy_flow_v2(df[data_split:], dec)
    # print("accuarcy: %.2f%%" % (accuracy * 100.0))
    print("------------------end: score:" + str(calc_score))

    import matplotlib.pyplot as plt

    from xgboost import plot_importance

    fig, ax = plt.subplots(figsize=(10, 15))
    plot_importance(dec, height=0.5, max_num_features=64, ax=ax)
    plt.show()
    # 参数重要性
    important = dict(zip(tezheng_v1, dec.feature_importances_))
    d_order = sorted(important.items(), key=lambda x: x[1], reverse=False)
    for i in d_order:
        print(i)
    print("end")

    # export_graphviz(dec, feature_names=tezheng, out_file="./tree.dot")
    # dot -Tpdf tree.dot -o tree.pdf


def load_decision():
    dec = XGBRegressor
    dec.load_model("stcok_xgb.model")


if __name__ == '__main__':
    decision()
