import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
# import pickle
from sklearn.externals import joblib
from sklearn.model_selection import GridSearchCV

from sklearn.metrics import accuracy_score
import numpy as np
y_scale = 1.8
y_column = 'up_950'
def buy_flow2(df, tezheng, dec):
    i = 0
    money = 10000
    count = 0
    count_map = {}
    err_count_map = {}
    x_test = df[tezheng][50001:].values
    x = df[["symbol", 'date', '1d_up']][50001:].values
    y_test = (df[y_column] / y_scale)[50001:].values
    for res in dec.predict(x_test):
        if str(res) in count_map:
            count_map[str(res)] = count_map[str(res)] + 1
        else:
            count_map[str(res)] = 1
        if res > 0:

            print(res, "|||", y_test[i], x[i])
            if y_test[i] < 0:
                count = count + 1
                if str(res) in err_count_map:
                    err_count_map[str(res)].append(y_test[i])
                else:
                    err_count_map[str(res)] = [y_test[i]]
            money = money * (1 + y_test[i] * y_scale/100)
        i = i + 1
    print(count_map)
    print(err_count_map)
    print("--交易了----", count, ":", money)

def decision():
    df = pd.read_csv("E:\\ts_data\\moneyflow\\all_3.csv")
    # print(df.head())
    df.dropna(inplace=True)
    df.drop(df[df['act_buy_m_20_r'] == 0].index, inplace=True)

    # df = df[df['up_945'] > 3]
    # print(df.dtypes)
    # 特征值

    tezheng = [
        # 'act_buy_xl_20', 'act_buy_xl_40', 'act_buy_xl_80', 'act_buy_xl_160',
        # 'act_sell_xl_20', 'act_sell_xl_40', 'act_sell_xl_80', 'act_sell_xl_160',
        'act_buy_xl_20_r', 'act_buy_xl_40_r', 'act_buy_xl_80_r', 'act_buy_xl_160_r',
        # 'act_buy_l_20', 'act_buy_l_40', 'act_buy_l_80', 'act_buy_l_160',
        # 'act_sell_l_20', 'act_sell_l_40', 'act_sell_l_80', 'act_sell_l_160',
        'act_buy_l_20_r', 'act_buy_l_40_r', 'act_buy_l_80_r', 'act_buy_l_160_r',
        # 'act_buy_m_20', 'act_buy_m_40', 'act_buy_m_80', 'act_buy_m_160',
        # 'act_sell_m_20', 'act_sell_m_40', 'act_sell_m_80', 'act_sell_m_160',
        'act_buy_m_20_r', 'act_buy_m_40_r', 'act_buy_m_80_r', 'act_buy_m_160_r',
        'dde_20', 'dde_40', 'dde_80', 'dde_160',
        'up_20m', 'up_40m', 'up_80m', 'up_160m',
        # '1d_up', '2d_up', '3d_up', '5d_up',
        'turnover_rate', 'turnover_rate_1', 'turnover_rate_2', 'turnover_rate_3'
        # ,'turnover'
    ]

    # 'close_1d',
    print(df.columns)

    x = df[tezheng][:50000].values
    # 目标值
    # y = pd.cut(df['up_945'], [-50, -10, -5, -2, 2, 5, 10, 50], labels=False)
    y = (df[y_column]/y_scale)[:50000].values
    # up_950 72.72%
    # up_945 75.47%
    # up_940 78.08% 78.15%
    # up_935        82.26%  0.813
    # x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25)
    x_train = df[tezheng][:50000].values
    y_train = (df[y_column]/y_scale)[:50000].values
    x_test = df[tezheng][50001:].values
    y_test = (df[y_column]/y_scale)[50001:].values
    print("X_train_shape:", x_train.shape, " y_train_shape:", y_train.shape)
    print("X_test_shape:", x_test.shape, "  y_test_shape:", y_test.shape)

    from collections import Counter

    print('Counter(data)\n', Counter(np.around(y_test)))
    # max_depth 5 或 6
    # 剪纸
    # 随机森林
    # dec = DecisionTreeClassifier(max_depth=6)
    # dec.fit(x_train, y_train.astype('int'))

    dec = RandomForestClassifier(n_estimators=5)
    dec.fit(x_train, np.around(y_train.astype('int')))
    # 保存模型
    # f = open('rf_01.pickle', 'wb')
    # pickle.dump(dec, f)
    #
    # 加载模型
    # dec = joblib.load('rfc_01_1.pkl')
    # f = open('rf_01.pickle', 'rb')
    # dec = pickle.load(f)
    print("随机森林准确率", dec.score(x_test, y_test.astype('int')))
    # rf = pickle.loads(byte_string)

    # rf = RandomForestClassifier(n_estimators=[5, 7, 9, 11, 15, 17, 19], max_depth=[4, 5, 6, 7, 8])
    # n_estimators 决策树的个数
    # max_depth 树的深度
    # params = {"n_estimators": [13, 14, 15, 16, 17, 18], "max_depth":[4, 5, 6, 7, 8]}
    # gc = GridSearchCV(rf, param_grid=params, cv=2)
    # gc.fit(x_train, np.around(y_train.astype('int')))
    #
    # print("----准确率： ", gc.score(x_test, np.around(y_test).astype('int')))
    # print("最优 参数模型", gc.best_params_)
    # print("--------------------------------------------------")
    # y_pred = gc.predict(x_test)
    # print("--------------------------------------------------")
    # accuracy = accuracy_score(np.around(y_test.astype('int')), y_pred)
    # print("--------------------------------------------------")
    # print("accuarcy: %.2f%%" % (accuracy * 100.0))  # accuarcy: 87.84%
    # buy_flow(x_test, y_test, dec)
    buy_flow2(df, tezheng, dec)
    print("------------------end")

    important = dict(zip(tezheng, dec.feature_importances_))
    # print('dec.feature_importances_222\n', important)

    d_order = sorted(important.items(), key=lambda x: x[1], reverse=False)
    for i in d_order:
        print(i)
    print("end")


if __name__ == "__main__":
    decision()

    # df = pd.read_csv("E:\\ts_data\\moneyflow\\all.csv")
    # std =StandardScaler()
    # data = std.fit_transform(df[['dde_l', 'dde_10', 'dde_20', 'dde_40', 'dde_80']].values)
    # print(data)
    # todo 进行标准化（）
