# -*- coding: utf-8 -*-
# 作者    ：SunDuWei
# 创作时间 ：2020-02-28
# 使用泽林的model包进行期货数据测试
import pandas as pd
import datetime
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import warnings
from collections import Counter
from imblearn.over_sampling import SMOTE
from imblearn.combine import SMOTEENN
from imblearn.under_sampling import RandomUnderSampler
import seaborn as sns
import matplotlib
import xgboost as xgb
import calendar
import shutil
import os
from model import XGBoost, SKLearnModel, xgb_cv_precision_score, precision_binary, recall_binary, xgb_cv_recall_score

warnings.filterwarnings('ignore')
kind = 'RB'
nIs_solo_kind = True
if nIs_solo_kind:
    df = pd.read_excel(r'E:\课堂资料\future_data\因子库\%s\factor_sheet_all.xlsx' % kind, parse_date=True)
    df_raw = df
    savepath = r'E:\课堂资料\future_data\回测数据\%s' % kind
else:
    df = pd.read_excel(r'E:\课堂资料\future_data\因子库\factor_sheet_all.xlsx', parse_date=True)
    df_raw = df
    kind = '多品种'
    savepath = r'E:\课堂资料\future_data\回测数据\多品种'
# 时间排序
df['datetime'] = df['date'] + ' ' + df['time']
df['datetime'] = pd.to_datetime(df['datetime'])
df = df.sort_values(by='datetime')
df_raw['datetime'] = df_raw['date'] + ' ' + df_raw['time']
df_raw['datetime'] = pd.to_datetime(df_raw['datetime'])
df_raw = df_raw.sort_values(by='datetime')

df['time'] = pd.to_datetime(df['time'], format="%H:%M:%S")
time1 = datetime.datetime.strptime('21:00:00', '%H:%M:%S')
time2 = datetime.datetime.strptime('02:30:00', '%H:%M:%S')
time3 = datetime.datetime.strptime('09:00:00', '%H:%M:%S')
time4 = datetime.datetime.strptime('12:00:00', '%H:%M:%S')
df['time'] = df['time'].apply(lambda k: 1 if k > time1 or k < time2 else 2 if time3 < k < time4 else 3)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
# df.dropna(how='any', subset=[], inplace=True)
# df.fillna(value=0, axis=1, inplace=True)
# x，y值(5分钟交易)的确定
ser_col = pd.Series(df.columns)
y1_col = list(ser_col[ser_col.str.startswith('y1_')])
y2_col = list(ser_col[ser_col.str.startswith('y2_')])
y3_col = list(ser_col[ser_col.str.startswith('y3_')])
y_col = y1_col + y2_col + y3_col
txt_col = list(df.dtypes[df.dtypes == 'object'].index)
non_x_col = txt_col + y_col + ['date'] + ['datetime']
x_col = list(set(ser_col.values).difference(non_x_col))
df.dropna(how='any', subset=x_col, inplace=True)

# 获取有效因子
try:
    df_validity = pd.read_excel(r'E:\课堂资料\future_data\回测数据\%s\feature_importance.xlsx' % kind, parse_date=True)
    validity_factor = list(pd.Series(df_validity.columns))
    nIs_new = 0
except:
    nIs_new = 1

df_pred_all = pd.DataFrame()
# 将突破分类
break_class = [-1, 1]
for j in break_class:
    df_temp = df[df['x2_break_signal'] == j]

    # # 独热编码
    # x = pd.get_dummies(x, columns=['x2_break_signal'], prefix_sep='_')

    y_value = df_temp['y3_strategy_yield'] * df_temp['x2_break_signal']
    y = y_value.apply(lambda k: 1 if k >= 10 else 0)
    if nIs_new:
        x = df_temp.loc[:, x_col]
    else:
        x = df_temp.loc[:, validity_factor]

    # 标准化
    scaler = StandardScaler()
    scaler.fit(x)
    x_trains = pd.DataFrame(scaler.transform(x), columns=x.columns, index=x.index)

    # 滚动测试
    count = 0
    for i in np.arange(0, 0.2, 0.1):
        count += 1
        nline_end = int((i + 0.8) * len(x_trains))
        nline_start = int(i * len(x_trains))
        nline_end_test = int((i + 0.9) * len(x_trains))
        nline_start_test = nline_end

        # 按时间切分数据
        x_train = x_trains[nline_start:nline_end]
        x_test = x_trains[nline_start_test:nline_end_test]
        y_train = y[nline_start:nline_end]
        y_test = y[nline_start_test:nline_end_test]

        # # 对训练集上采样
        # sample_ratio = dict(Counter(y_train))
        # sample_ratio[1] = int(sample_ratio[0] * 0.8)
        # model_Sample = SMOTE(random_state=42, sampling_strategy=sample_ratio)
        # x_mod_train, y_mod_train = model_Sample.fit_sample(x_train, y_train)
        # print(Counter(y_mod_train))

        # 对训练集进行下采样
        print(Counter(y_train))
        sample_ratio = dict(Counter(y_train))
        sample_ratio[0] = int(sample_ratio[1] * 1)
        model_Sample = RandomUnderSampler(random_state=42, sampling_strategy=sample_ratio)
        x_mod_train, y_mod_train = model_Sample.fit_sample(x_train, y_train)
        print(Counter(y_mod_train))

        class_sklearn = SKLearnModel(x=x_mod_train, y=y_mod_train)
        bst = XGBoost(objective='binary:logistic',
                      gamma=0.1, learning_rate=0.1,
                      n_estimator=100, n_classes=2, max_depth=4,
                      colsample_bytree=0.9, min_child_weight=3, silent=0,
                      seed=0, num_boost_round=50
                      )
        xgb_cv = class_sklearn.cv(
            bst,
            cv_scorer=xgb_cv_recall_score,
            savepath=savepath,
            max_scorer=True,
            cv_fold=5,
            is_xgb=True
        )
        xgb_param_grid = {
            'max_depth': list(range(5, 12)),
            'min_child_weight': list((0.5, 1, 3, 6, 9)),
            # 'gamma': [0.1 * i for i in range(0, 5)],
            # 'subsample': [0.1 * i for i in range(6, 9)],
            # 'colsample_bytree': [0.1 * i for i in range(6, 9)],
            # 'eta': [0.5, 0.4, 0.3, 0.2, 0.1, 0.075, 0.05, 0.04, 0.03],
        }
        xgb_grid_estimator = XGBoost(objective='binary:logistic',
                                     eta=0.1, colsample_bytree=0.5,
                                     num_boost_round=50
                                     )
        grid, grid_best = class_sklearn.grid_search(
            xgb_grid_estimator,
            savepath,
            xgb_param_grid,
            scorer=recall_binary,  # 该scorer仅适合binary logistic
            max_scorer=True,
            cv_fold=5,
            is_xgb=True,
            bst_name="xgb_%d" % count,  # 保存最优模型
        )
        y_pred = SKLearnModel.predict(
            model=grid.best_estimator_.bst,
            savepath=savepath,
            x_test=x_test,
            y_test=y_test,
            label="binary",
            is_xgb=True
        )
        df_pred_value = pd.DataFrame({'y_pred': y_pred.tolist()}).set_index(y_test.index)
        df_pred_all = df_pred_all.append(df_pred_value)
        print('完成%d方向第%d次测试' % (j, count))

df_pred = pd.merge(df_raw, df_pred_all, left_index=True, right_index=True)
df_pred.to_excel(r'E:\课堂资料\future_data\因子库\%s\factor_pre_all.xlsx' % kind, index=False)
# # 数据切分(随机)
# # x_train, x_test, y_train, y_test = train_test_split(x_trains, y, test_size=0.2, random_state=0)
#
# # 数据切分（按时间）
# nline = int(0.8*len(x_trains))
# x_train = x_trains[:nline]
# x_test = x_trains[nline:]
# y_train = y[:nline]
# y_test = y[nline:]
#
# # # 对训练集上采样
# # sample_ratio = dict(Counter(y_train))
# # sample_ratio[1] = int(sample_ratio[0] * 0.8)
# # model_Sample = SMOTE(random_state=42, sampling_strategy=sample_ratio)
# # x_mod_train, y_mod_train = model_Sample.fit_sample(x_train, y_train)
# # print(Counter(y_mod_train))
#
# # 对训练集进行下采样
# sample_ratio = dict(Counter(y_train))
# sample_ratio[0] = int(sample_ratio[1] * 1.5)
# model_Sample = RandomUnderSampler(random_state=42, sampling_strategy=sample_ratio)
# x_mod_train, y_mod_train = model_Sample.fit_sample(x_train, y_train)
# print(Counter(y_mod_train))
#
# # # 组合采样
# # smote_tomek = SMOTEENN(random_state=0)
# # x_mod_train, y_mod_train = smote_tomek.fit_sample(x_train, y_train)
#
# # 对突破进行训练
# class_sklearn = SKLearnModel(x=x_mod_train, y=y_mod_train)
# bst = XGBoost(objective='binary:logistic',
#               gamma=0.1, learning_rate=0.1,
#               n_estimator=100, n_classes=2, max_depth=4,
#               colsample_bytree=0.9, min_child_weight=3, silent=0,
#               seed=0, num_boost_round=50
#               )
# xgb_cv = class_sklearn.cv(
#     bst,
#     cv_scorer=xgb_cv_precision_score,
#     max_scorer=True,
#     cv_fold=5,
#     is_xgb=True
# )
# xgb_param_grid = {
#     'max_depth': list(range(5, 12)),
#     'min_child_weight': list((0.5, 1, 3, 6, 9)),
#     # 'gamma': [0.1 * i for i in range(0, 5)],
#     # 'subsample': [0.1 * i for i in range(6, 9)],
#     # 'colsample_bytree': [0.1 * i for i in range(6, 9)],
#     # 'eta': [0.5, 0.4, 0.3, 0.2, 0.1, 0.075, 0.05, 0.04, 0.03],
#                 }
# xgb_grid_estimator = XGBoost(objective='binary:logistic',
#                              eta=0.1, colsample_bytree=0.5,
#                              num_boost_round=50
#                              )
# grid, grid_best = class_sklearn.grid_search(
#     xgb_grid_estimator,
#     xgb_param_grid,
#     scorer=precision_binary,  # 该scorer仅适合binary logistic
#     max_scorer=True,
#     cv_fold=5,
#     is_xgb=True,
#     bst_name="xgb_001",  # 保存最优模型
# )
# y_pred = SKLearnModel.predict(
#     model=grid.best_estimator_.bst,
#     x_test=x_test,
#     y_test=y_test,
#     label="binary",
#     is_xgb=True
# )

# df_pred_value = pd.DataFrame({'y_pred': y_pred.tolist()}).set_index(y_test.index)
# df_pred = pd.merge(df_raw, df_pred_value, left_index=True, right_index=True)
# df_pred.to_excel(r'E:\课堂资料\future_data\因子库\factor_pre_all.xlsx', index=False)
