#!/usr/bin/env python
# coding: utf-8
import matplotlib.pyplot as plt
import datetime
import h2o
import numpy as np
import pandas as pd
import time
import os
import backtest_minutes, model_training, model_judgement
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, \
    RandomForestClassifier, GradientBoostingClassifier
import warnings
from boruta_test import boruta_test

warnings.filterwarnings('ignore')

'''
V2.0.0
从h2o移植到kearas上
把1min数据的获取和初始化从因子类中 拿到了GetData类，因子类只负责计算数据的因子
增添了OpenInterestFactor类，用于计算期货仓位因子
'''


class within_day_price_features:
    """
    class of the features for using prices (only using prices)
    the inputs for short_term_prices would be a dataframe with Date (must),Time(must),open,high,low,close(must)
     (all of them is minutes/seconds data)
    the timestamp referred is a string
    """

    #   Datetime_Op=Datetime_Operation()
    #   self.data['Time']=Datetime_Op.date_to_timestamp(self.data['Time'])
    #   self.data['Date']=Datetime_Op.date_to_timestamp(self.data['Date'])
    def __init__(self, data):
        self.data = data

    def returns_without_cost(self, timestamp1, timestamp2, direction):
        """
        date and time is datetime64[ns] object
        timestamp1 is a string, so is timestamp2
        timestamp2 is greater than or equal to timestamp1
        """
        close_prices = self.get_close_price(timestamp1, timestamp2)
        price1 = close_prices[0]
        price2 = close_prices[-1]
        if direction > 0:
            self.long_return = price2 / price1 - 1
            return self.long_return
        else:
            self.short_return = (price1 - price2) / price1
            return self.short_return

    def get_close_price(self, timestamp1, timestamp2):
        """
        get the close, open, high, low, price of certain time interval  (inclusive)
        timestamp1 is a string, so is timestamp2
        timestamp2 is greater than or equal to timestamp1
        """
        close_prices = self.data['close'][
            (self.data['Time'] >= pd.Timestamp(timestamp1).time()) & (
                    self.data['Time'] <= pd.Timestamp(timestamp2).time())]
        return close_prices.as_matrix()

    def maxprice_to_open(self, timestamp1, timestamp2):
        """
        max price/open price -1
        timestamp1 is a string, so is timestamp2
        timestamp2 is greater than or equal to timestamp1
        """
        prices = self.get_close_price(timestamp1, timestamp2)
        maxprice_open = prices.max() / prices[0] - 1
        return maxprice_open

    def minprice_to_open(self, timestamp1, timestamp2):
        """
        min price/open price -1
        timestamp1 is a string, so is timestamp2
        timestamp2 is greater than or equal to timestamp1
        """
        prices = self.get_close_price(timestamp1, timestamp2)
        minprice_open = prices.min() / prices[0] - 1
        return minprice_open

    # get the close price to settle price
    def close_to_settle(self):
        prices = self.get_close_price('14:00:00', '15:00:00')
        closesettle = prices[-1] / prices.mean() - 1
        return closesettle


class GetData:
    def __init__(self, data_source, start_date, end_date=None, return_freq = 'D'):
        self.data_source = data_source
        self.start_date = start_date
        self.end_date = end_date
        self.return_freq = return_freq

    def rq_to_h2o_type(self, df_rq_data):
        """
        将米筐格式的数据改为h2o模型训练的数据格式
        :param df_rq_data:
        :return:
        """
        # 生成Date 与 Time列
        df_rq_data['Date'] = df_rq_data.datetime.apply(lambda x: datetime.date(int(x[:4]), int(x[5:7]), int(x[8:10])))
        df_rq_data['Time'] = df_rq_data.datetime.apply(
            lambda x: datetime.time(int(x[-8:-6]), int(x[-5:-3]), int(x[-2:])))
        # 修改列名
        df_rq_data = df_rq_data.rename(
            columns={'code': 'Code', 'datetime': 'DATETIME', 'vol': 'volume', 'amo': 'amount'})
        # 去掉trade_date, pre_close两列
        if 'open_interest' in df_rq_data.columns:
            column_titles = ['Code', 'DATETIME', 'Date', 'Time', 'open', 'high', 'low', 'close', 'amount', 'volume', 'open_interest']
        else:
            column_titles = ['Code', 'DATETIME', 'Date', 'Time', 'open', 'high', 'low', 'close', 'amount', 'volume']
        drop_column_list = list(set(df_rq_data.columns.tolist()).difference(set(column_titles)))
        df_rq_data.drop(drop_column_list, axis=1, inplace=True)
        # 将列重新排序
        df_rq_data = df_rq_data.reindex(columns=column_titles)
        # 去掉20180820日之后的14:59数据（尾盘集合竞价）
        drop_index = np.array(df_rq_data.index)[(df_rq_data.Time == datetime.time(14, 59, 00))
                                                & (df_rq_data.volume == 0) & (
                                                            df_rq_data.Date >= datetime.date(2018, 8, 20))].tolist()
        df_rq_data.drop(drop_index, axis=0, inplace=True)
        # 按照DATETIME升序排序
        df_rq_data.sort_values(by='DATETIME', inplace=True)
        # 重新设置index
        df_rq_data.reset_index(drop=True, inplace=True)
        # 调整数据格式
        df_rq_data.DATETIME = pd.to_datetime(df_rq_data.DATETIME)
        df_rq_data.volume = df_rq_data.volume.astype('float')
        return df_rq_data

    def total_data(self):
        df = pd.DataFrame()
        if self.data_source == 'original':
            df = pd.read_pickle('./data/TimestampPriceMin.pickle')
        elif self.data_source == 'ricequant':
            df = pd.read_csv('./data/' + code + '_1min_pre.csv')
            df = GetData.rq_to_h2o_type(self, df_rq_data=df)
        else:
            print("Error: data_source is wrong, pls input 'ricequant' or 'original'")
        df = select_data(df, 'Date', self.start_date, end_date=self.end_date)
        return df

    def split_by_day(self, data):
        """
        split by date to get a object containing everyday mins data
        return a list containing all the single day dataframe
        'Time', 'Date', 'DATETIME','preclose','open','high','low','close','amt','volumn'
        """
        data['Time'], data['Date'] = data['DATETIME'].apply(lambda x: x.time()), \
                                     data['DATETIME'].apply(lambda x: x.date())
        oneday = []
        for dates in data['Date'].unique():
            oneday.append(data[data['Date'] == dates])
        return oneday

    def maketrain(self, oneday):
        Date = oneday['Date'].iloc[0]
        oneday_price = within_day_price_features(
            oneday[['Date', 'Time', 'open', 'high', 'low', 'close']])

        start1000 = oneday_price.returns_without_cost("09:31:00", "10:00:00", 1)
        start1030 = oneday_price.returns_without_cost("10:00:00", "10:30:00", 1)
        start1100 = oneday_price.returns_without_cost("10:30:00", "11:00:00", 1)
        start1300 = oneday_price.returns_without_cost("11:00:00", "13:00:00", 1)
        start1330 = oneday_price.returns_without_cost("13:00:00", "13:30:00", 1)
        start1400 = oneday_price.returns_without_cost("13:30:00", "14:00:00", 1)
        start1430 = oneday_price.returns_without_cost("14:01:00", "14:30:00", 1)
        start1500 = oneday_price.returns_without_cost("14:30:00", "15:00:00", 1)

        if self.return_freq == 'D':
            R1000 = float(oneday_price.returns_without_cost("10:00:00", "15:00:00", 1))
            R1030 = float(oneday_price.returns_without_cost("10:30:00", "15:00:00", 1))
            R1100 = float(oneday_price.returns_without_cost("11:00:00", "15:00:00", 1))
            R1300 = float(oneday_price.returns_without_cost("13:00:00", "15:00:00", 1))
            R1330 = float(oneday_price.returns_without_cost("13:30:00", "15:00:00", 1))
            R1400 = float(oneday_price.returns_without_cost("14:00:00", "15:00:00", 1))
            R1430 = float(oneday_price.returns_without_cost("14:30:00", "15:00:00", 1))
        elif self.return_freq == '30m':
            R1000 = float(start1030)
            R1030 = float(start1100)
            R1100 = float(start1300)
            R1300 = float(start1330)
            R1330 = float(start1400)
            R1400 = float(start1430)
            R1430 = float(start1500)

        maxpT1000 = oneday_price.maxprice_to_open("09:31:00", "10:00:00")
        maxpT1030 = oneday_price.maxprice_to_open("09:31:00", "10:30:00")
        maxpT1100 = oneday_price.maxprice_to_open("09:31:00", "11:00:00")
        maxpT1300 = oneday_price.maxprice_to_open("09:31:00", "13:00:00")
        maxpT1330 = oneday_price.maxprice_to_open("09:31:00", "13:30:00")
        maxpT1400 = oneday_price.maxprice_to_open("09:31:00", "14:00:00")
        maxpT1430 = oneday_price.maxprice_to_open("09:31:00", "14:30:00")

        minpT1000 = oneday_price.minprice_to_open("09:31:00", "10:00:00")
        minpT1030 = oneday_price.minprice_to_open("09:31:00", "10:30:00")
        minpT1100 = oneday_price.minprice_to_open("09:31:00", "11:00:00")
        minpT1300 = oneday_price.minprice_to_open("09:31:00", "13:00:00")
        minpT1330 = oneday_price.minprice_to_open("09:31:00", "13:30:00")
        minpT1400 = oneday_price.minprice_to_open("09:31:00", "14:00:00")
        minpT1430 = oneday_price.minprice_to_open("09:31:00", "14:30:00")

        prevClose_prevSettle = oneday_price.close_to_settle()
        OpenPrice = oneday_price.get_close_price("09:31:00", "15:00:00")[0]
        ClosePrice = oneday_price.get_close_price("09:31:00", "15:00:00")[-1]

        train = pd.DataFrame([Date, OpenPrice, ClosePrice, prevClose_prevSettle,
                              start1000, start1030, start1100, start1300, start1330,
                              start1400, start1430, start1500,
                              R1000, R1030, R1100, R1300, R1330, R1400, R1430,
                              maxpT1000, maxpT1030, maxpT1100, maxpT1300, maxpT1330,
                              maxpT1400, maxpT1430,
                              minpT1000, minpT1030, minpT1100, minpT1300, minpT1330,
                              minpT1400, minpT1430]).transpose()
        return train

    def get_traindata(self, TimestampPrice, csv_flag=True, df_whole=None, feature_whole_list=None,
                      df_solo=None, feature_solo_matrix=None):
        if csv_flag:
            ATST_train = pd.read_csv(r'data\ATST_train.csv')
            ATST_train.Date = pd.to_datetime(ATST_train.Date)
            ATST_train.Date = ATST_train.Date.apply(lambda x: x.date())
        else:
            everyday = GetData.split_by_day(self, TimestampPrice)
            ATST_train = []
            for x in everyday:
                try:
                    ATST_train.append(GetData.maketrain(self, x))
                except:
                    print(str(x) + "error occured \n")

            ATST_train = pd.concat(ATST_train)
            ATST_train.columns = ['Date', 'OpenPrice', 'ClosePrice',
                                  'prevClose_prevSettle',
                                  'start1000', 'start1030', 'start1100', 'start1300',
                                  'start1330', 'start1400', 'start1430', 'start1500',
                                  'R1000', 'R1030', 'R1100', 'R1300', 'R1330', 'R1400',
                                  'R1430',
                                  'maxpT1000', 'maxpT1030', 'maxpT1100', 'maxpT1300',
                                  'maxpT1330', 'maxpT1400', 'maxpT1430',
                                  'minpT1000', 'minpT1030', 'minpT1100', 'minpT1300',
                                  'minpT1330', 'minpT1400', 'minpT1430']

            # change all the data type to float
            ATST_train.iloc[:, range(1, np.shape(ATST_train)[1])] = ATST_train.iloc[:,
                                                                    range(1, np.shape(ATST_train)[1])].astype(float)
            ATST_train.iloc[:, range(1, np.shape(ATST_train)[1])] = ATST_train.iloc[:,
                                                                    range(1, np.shape(ATST_train)[1])].apply(
                lambda x: pd.to_numeric(x, errors='ignore'))

            ATST_train['prevClose'] = ATST_train['ClosePrice'].shift(1)
            ATST_train['prevClose_prevSettle'] = ATST_train['prevClose_prevSettle'].shift(1)
            ATST_train['prevClose_prevOpen'] = ATST_train['ClosePrice'] / ATST_train['OpenPrice'] - 1
            ATST_train['prevClose_prevOpen'] = ATST_train['prevClose_prevOpen'].shift(1)
            ATST_train['todayOpen_prevClose'] = ATST_train['OpenPrice'] / ATST_train['prevClose'] - 1

            ATST_train = ATST_train.dropna(axis=0)
            ATST_train.to_csv(r'data\ATST_train.csv')

        # 特征工程
        if df_whole is None or feature_whole_list is None:
            df_whole = pd.DataFrame(data=[0], columns=['Date'])
            feature_whole_list = []
        if df_solo is None or feature_solo_matrix is None:
            df_solo = pd.DataFrame(data=[0], columns=['Date'])
            feature_solo_matrix = [[] for x in range(7)]

        if isinstance(df_whole.Date.iloc[0], datetime.date):
            ATST_train = pd.merge(ATST_train, df_whole, on='Date')
        if isinstance(df_solo.Date.iloc[0], datetime.date):
            print(df_solo.Date.iloc[0], type(df_solo.Date.iloc[0]))
            print(ATST_train.Date.iat[0], type(ATST_train.Date.iat[0]))
            ATST_train = pd.merge(ATST_train, df_solo, on='Date')

        try:
            traindata = [ATST_train[['R1000', 'Date', 'start1000', 'maxpT1000', 'minpT1000',
                                     'prevClose_prevSettle', 'prevClose_prevOpen', 'todayOpen_prevClose']
                                    + feature_whole_list + feature_solo_matrix[0]],
                         ATST_train[
                             ['R1030', 'Date', 'start1000', 'start1030', 'maxpT1030', 'minpT1030',
                              'prevClose_prevSettle', 'prevClose_prevOpen', 'todayOpen_prevClose']
                             + feature_whole_list + feature_solo_matrix[1]],
                         ATST_train[
                             ['R1100', 'Date', 'start1000', 'start1030', 'start1100',
                              'maxpT1100', 'minpT1100', 'prevClose_prevSettle', 'prevClose_prevOpen',
                              'todayOpen_prevClose'] + feature_whole_list + feature_solo_matrix[2]],
                         ATST_train[
                             ['R1300', 'Date', 'start1000', 'start1030', 'start1100', 'start1300',
                              'maxpT1300', 'minpT1300', 'prevClose_prevSettle', 'prevClose_prevOpen',
                              'todayOpen_prevClose'] + feature_whole_list + feature_solo_matrix[3]],
                         ATST_train[
                             ['R1330', 'Date', 'start1000', 'start1030', 'start1100', 'start1300', 'start1330',
                              'maxpT1330', 'minpT1330', 'prevClose_prevSettle', 'prevClose_prevOpen',
                              'todayOpen_prevClose'] + feature_whole_list + feature_solo_matrix[4]],
                         ATST_train[
                             ['R1400', 'Date', 'start1000', 'start1030',
                              'start1100', 'start1300', 'start1330', 'start1400',
                              'maxpT1400', 'minpT1400',
                              'prevClose_prevSettle', 'prevClose_prevOpen',
                              'todayOpen_prevClose'] + feature_whole_list + feature_solo_matrix[5]],
                         ATST_train[
                             ['R1430', 'Date', 'start1000', 'start1030',
                              'start1100', 'start1300', 'start1330', 'start1400',
                              'start1430', 'maxpT1430', 'minpT1430',
                              'prevClose_prevSettle', 'prevClose_prevOpen',
                              'todayOpen_prevClose'] + feature_whole_list + feature_solo_matrix[6]]]
            return traindata
        except Exception as e:
            if not set(feature_whole_list).issubset(set(list(ATST_train.columns))):
                print('输入的feature_whole_list包含df_whole没有的特征')
            elif not set(np.array(feature_solo_matrix).reshape([-1, ]).tolist()).issubset(
                    set(list(ATST_train.columns))):
                print('输入的feature_whole_matrix包含df_solo没有的特征')
            else:
                print('Error: 合并新特征时出错，请确认输入的特征数据及名称'
                      '注意：输入的feature_whole_list格式须为列表；'
                      'feature_solo_matrix须为矩阵，即列表套列表')

    def data_1min_handle(self):
        # 未来这里要加入从数据库读取股票数据的代码。建议写成子函数的形式。
        # 读取时按照时间为索引；每次全部读入再取，效率会低。
        df = pd.read_csv(r'./data/' + code + '_1min_pre.csv')
        df['date'] = df.datetime.apply(lambda x: datetime.date(int(x[:4]), int(x[5:7]), int(x[8:10])))
        df['time'] = df.datetime.apply(lambda x: datetime.time(int(x[-8:-6]), int(x[-5:-3]), int(x[-2:])))
        df = select_data(df, 'date', self.start_date, self.end_date)
        df['return_classifier'] = df.close.pct_change()
        df['return_classifier'].iloc[0] = df.close.iloc[0] / df.open.iloc[0] - 1

        return df


class TradeSignal:
    def __init__(self, traindata, predict_start_date, train_engine='h2o', train_id=None):
        self.traindata = traindata
        self.predict_start_date = predict_start_date
        self.train_engine = train_engine
        self.train_id = train_id

        if train_engine == 'h2o':
            try:
                h2o.init()

            except Exception as e:
                print("Error: check your h2o connection;"
                      "h2o is a python package for machine learning")

    def combine_datetime(self, date, time):
        Datetime = []
        for i in range(len(date)):
            Datetime.append(
                datetime.datetime.combine(date.iloc[i], time.iloc[i]))
        return Datetime

    def add_time(self, prediction, time):
        prediction['Time'] = [time] * prediction.shape[0]
        return prediction

    def get_TimestampSignal(self):

        TimestampSignal = []
        for ATST_traindata in self.traindata:
            if self.train_engine == 'h2o':
                train = model_training.H2Odeeplearning(ATST_traindata)
            elif self.train_engine == 'keras':
                train = model_training.KerasDeeplearning(ATST_traindata, self.train_id)

            # 算出隐藏层
            HiddenLayer = list(range(int(np.floor((ATST_traindata.shape[1] - 2) / 2).item()) * 2, 0, -2))
            Hiddendropout = [0.2] * len(HiddenLayer)

            prediction = train.DL_rolling_autoencoder_ANN(
                self.predict_start_date, HiddenLayer, Hiddendropout)
            TimestampSignal.append(prediction)

        TimestampSignal[0] = TradeSignal.add_time(self, TimestampSignal[0], datetime.time(10, 0))
        TimestampSignal[1] = TradeSignal.add_time(self, TimestampSignal[1], datetime.time(10, 30))
        TimestampSignal[2] = TradeSignal.add_time(self, TimestampSignal[2], datetime.time(11, 0))
        TimestampSignal[3] = TradeSignal.add_time(self, TimestampSignal[3], datetime.time(11, 30))
        TimestampSignal[4] = TradeSignal.add_time(self, TimestampSignal[4], datetime.time(13, 30))
        TimestampSignal[5] = TradeSignal.add_time(self, TimestampSignal[5], datetime.time(14, 0))
        TimestampSignal[6] = TradeSignal.add_time(self, TimestampSignal[6], datetime.time(14, 30))
        TimestampSignal = pd.concat(TimestampSignal)
        TimestampSignal = TimestampSignal.sort_values(by=['Date', 'Time'])
        TimestampSignal['DATETIME'] = TradeSignal.combine_datetime(self, TimestampSignal['Date'],
                                                                   TimestampSignal['Time'])

        TimestampSignal.to_csv('data/TimestampSignal.csv', index=None)

        return TimestampSignal


class Return:
    def __init__(self):
        print("沪深300的收益计算采用以2015年1月1日为基线的前复权价格；基线的设定对收益无关")

    def hs300_pre_close_return(self, start_date, end_date=None):
        """
        计算沪深300前复权价格日级别的收益，以2015年1月1日为基线
        :param start_date:
        :param end_date:
        """
        # 读取沪深300数据
        df_hs300_data = pd.read_csv('data/' + code + '_day_pre.csv', usecols=['datetime', 'close'])
        df_hs300_data['datetime'] = df_hs300_data.datetime.apply(
            lambda x: datetime.date(int(x[:4]), int(x[5:7]), int(x[8:10])))
        # 选定指定价格区间的前复权价格数据
        df_hs300_data = select_data(df=df_hs300_data, date_column_str='datetime',
                                    start_date=start_date, end_date=end_date)
        # 计算沪深300前复权价格收益
        return_column_name = code + '_benchmark'
        # df_hs300_data[return_column_name] = np.cumprod(1 + df_hs300_data.close.pct_change())
        # 这里基准收益率曲线未采用复利的方式是因为，假设基准组合为满仓hs300呈现的收益
        df_hs300_data[return_column_name] = np.divide(df_hs300_data.close, df_hs300_data.close[df_hs300_data.index[0]])
        df_hs300_data.set_index(['datetime'], inplace=True)
        return df_hs300_data, return_column_name

    def get_backtest_result(self, TimestampPrice, TimestampSignal=pd.DataFrame()):
        print("如果输入的TimestamPrice来源于本地，则无需输入TimestampSignal一列")
        if TimestampSignal.empty:
            TimestampSignal = pd.read_csv('data/TimestampSignal.csv')
            TimestampSignal.Date = pd.to_datetime(TimestampSignal.Date, format='%Y-%m-%d')
            TimestampSignal.Date = TimestampSignal.Date.apply(lambda x: x.date())
            TimestampSignal.DATETIME = pd.to_datetime(TimestampSignal.DATETIME)

        # the time period for TimestampPrice must be bigger than TimestampSignal
        backtestMins = backtest_minutes.backtest_minutes(
            TimestampSignal, TimestampPrice, opencost=0.23 / 10000, closecost=0.23 / 10000, closecost2=3.45 / 10000)
        datevalues, actionlist = backtestMins.crossday_fixedstoploss_mins_value_everyday(r=0.04)
        datevalues.to_csv('data/datevalues.csv', index=None)

        ReturnsManipulation = model_judgement.Returns_calculation(datevalues)
        # to merge same day return
        ReturnsManipulation.merge_by_date()
        # to impute zeros for days without trading
        tradedates = TimestampSignal['Date'].unique()
        ReturnsManipulation.impute_zero_returns(tradedates)
        # calculate the simple returns
        ReturnsManipulation.returns_to_Simplevalues()

        # combine backtest result with dates
        date_val = model_judgement.combine_date_value(
            pd.Series(tradedates), ReturnsManipulation.SimpleValue)
        date_val.date_and_value(include_first=True)

        datevalues = date_val.datevalue
        datevalues.columns = ['Date', 'Value']
        datevalues.set_index(['Date'], inplace=True)

        # plt.figure(figsize=(10, 6))
        # datevalues['Value'].plot(legend=True)
        # datevalues.to_csv('data/datevalues.csv', index=None)
        #
        # df_return, return_column = Return.hs300_pre_close_return(self,
        # 	start_date=str(datevalues.index[0]), end_date=str(datevalues.index[-1]))
        # df_return[return_column].plot(legend=True)
        # plt.show()
        return datevalues, date_val, actionlist


class TechFactor:
    def __init__(self, freq, factors_list, start_date, end_date):
        """
        按照时间，提取技术指标因子
        :param freq: day/30m/1m
        :param start_date/end_date: YY-MM-DD的格式
        """
        self.factors_list = factors_list
        self.start_date = start_date
        self.end_date = end_date
        self.freq = freq

    def run(self):
        if self.freq == 'day':
            df_factor = TechFactor.day_factor(self)
            return df_factor

    def day_factor(self):
        df_hs300_tec = pd.read_csv('data/000300_tec.xls', sep='\t',
                                   encoding='gbk', header=1).iloc[:-1]
        df_hs300_tec.columns = map(lambda x: x.strip(), df_hs300_tec.columns)
        df_hs300_tec.rename(columns={'时间': 'time'}, inplace=True)
        df_hs300_tec['time'] = pd.to_datetime(df_hs300_tec['time'])
        df_hs300_tec['date'] = df_hs300_tec['time'].apply(lambda x: x.date())
        try:
            df_factor = select_data(df_hs300_tec.loc[:, ['date'] + self.factors_list],
                                    'date', self.start_date, self.end_date)
            return df_factor
        except Exception as e:
            print("Error: 输入的factors_list有元素不在数据库中；"
                  "请从以下因子名称中挑选输入", df_hs300_tec.columns[6: -1])


def tech_features(df):
    # 改格式
    df.rename(columns={'date': 'Date', 'BIAS.BIAS1': 'pre_BIAS1', 'BIAS.BIAS2': 'pre_BIAS2',
                       'BIAS.BIAS3': 'pre_BIAS3', 'KDJ.K': 'pre_KDJ.K', 'KDJ.D': 'pre_KDJ.D',
                       'KDJ.J': 'pre_KDJ.J', 'MACD.MACD': 'pre_MACD', 'TRIX.TRIX': 'pre_TRIX'}, inplace=True)
    # 特征工程
    df.iloc[:, 1:] = df.iloc[:, 1:].shift()
    return df


class VolumeFactors:
    def __init__(self, factor_list, df, window, classify=False, code=None):
        """
        在这个class增加因子时，首先要在factors_all里增加因子名称；
        同时在factors_dict增加函数调用;
        另外，如果新加入的因子需要首先对数据按日期groupby，则还要加入到{}，以激活self.df_split
        """
        self.df = df
        # 未来：加入数据库索引的格式，直接取出价量数据
        factors_all = {'sum_window_volume', 'until_window_volume', 'mean_window_volume', 'vol_window_pct'}
        if not set(factor_list).issubset(factors_all):
            print('输入的因子名称有误，请选取以下因子输入')
            print(factors_all)
        else:
            self.window = window
            if self.window not in ['30m', '1h', '1d', '60m', '240m']:
                print("Error: window必须为 '30m', '1h', '1d', '60m' 或 '240m' ")
            else:
                if self.window == '1h':
                    self.window = '60m'
                elif self.window == '1d':
                    self.window = '240m'
                self.code = code
                self.classify = classify

                if set(factor_list).union({'sum_window_volume', 'until_window_volume',
                                           'mean_window_volume', 'vol_window_pct'}) is not None:
                    self.df_split = self.split_data(self.df, self.window)
                    self.columns_no_date = self.df_split.columns.tolist()
                    self.columns_no_date.remove('date')

    def sum_window_volume(self):
        return self.df_split

    def until_window_volume(self):
        df_data = self.df_split.copy()
        if self.classify:
            buy_columns_no_date = self.columns_no_date[:8]
            sell_columns_no_date = self.columns_no_date[8:]
            for iCol in buy_columns_no_date:
                index = buy_columns_no_date.index(iCol)
                seri = df_data.loc[:, buy_columns_no_date[max(0, index - 1)]: iCol]
                df_data.loc[:, iCol] = df_data.loc[:, buy_columns_no_date[max(0, index - 1)]: iCol].sum(axis=1)
            for iCol in sell_columns_no_date:
                index = sell_columns_no_date.index(iCol)
                seri = df_data.loc[:, sell_columns_no_date[max(0, index - 1)]: iCol]
                df_data.loc[:, iCol] = df_data.loc[:, sell_columns_no_date[max(0, index - 1)]: iCol].sum(axis=1)
        else:
            for iCol in self.columns_no_date:
                index = self.columns_no_date.index(iCol)
                df_data.loc[:, iCol] = df_data.loc[:, self.columns_no_date[max(0,index - 1)]: iCol].sum(axis=1)
        df_data.rename(columns={iColumn: 'until_window_volume_' + iColumn for iColumn in self.columns_no_date},
                       inplace=True)
        return df_data

    def mean_window_volume(self, window):
        """分钟成交量"""
        df_data = self.df_split.copy()
        df_data.loc[:, self.columns_no_date] = df_data.loc[:, self.columns_no_date] / int(window[:-1])
        df_data.rename(columns={iColumn: 'mean_' + iColumn for iColumn in self.columns_no_date}, inplace=True)
        return df_data

    def vol_window_pct(self):
        """量比"""
        df_data = self.df_split.copy()
        df_data[self.columns_no_date] = df_data[self.columns_no_date] / df_data[self.columns_no_date].shift()
        df_data.rename(columns={iColumn: 'vol_window_pct_' + iColumn for iColumn in self.columns_no_date}, inplace=True)
        return df_data.dropna(axis=0)

    def split_data(self, df, window):
        if window == "30m":
            df_data = df.groupby('date').apply(self.split_by_30min)
            if self.classify:
                header = [
                    'vol30_1000_buy', 'vol30_1030_buy', 'vol30_1100_buy', 'vol30_1130_buy',
                    'vol30_1330_buy', 'vol30_1400_buy', 'vol30_1430_buy', 'vol30_1500_buy',
                    'vol30_1000_sell', 'vol30_1030_sell', 'vol30_1100_sell', 'vol30_1130_sell',
                    'vol30_1330_sell', 'vol30_1400_sell', 'vol30_1430_sell', 'vol30_1500_sell']
            else:
                header = ['vol30_1000', 'vol30_1030', 'vol30_1100', 'vol30_1130',
                          'vol30_1330', 'vol30_1400', 'vol30_1430', 'vol30_1500']
            df_data.columns = header
            df_data.reset_index(level=0, inplace=True)
            return df_data
        elif window == '60m':
            df_data = df.groupby('date').apply(self.split_by_1h)
            if self.classify:
                header = [
                    'vol60_1030_buy', 'vol60_1130_buy', 'vol60_1400_buy', 'vol60_1500_buy',
                    'vol60_1030_sell', 'vol60_1130_sell', 'vol60_1400_sell', 'vol60_1500_sell']
            else:
                header = ['vol60_1030', 'vol60_1130', 'vol60_1400', 'vol60_1500']
            df_data.columns = header
            df_data.reset_index(level=0, inplace=True)
            return df_data
        elif window == '240m':
            return df
        else:
            print('Error: 输入的window出错，但是错误原因不明')

    def split_by_30min(self, df):
        if self.classify:
            vol30_1000_buy = df[(df.time >= datetime.time(9, 30)) & (df.time <= datetime.time(10, 0)) & (
                        df['return_classifier'] > 0)].vol.sum()
            vol30_1030_buy = df[(df.time > datetime.time(10, 0)) & (df.time <= datetime.time(10, 30)) & (
                        df['return_classifier'] > 0)].vol.sum()
            vol30_1100_buy = df[(df.time > datetime.time(10, 30)) & (df.time <= datetime.time(11, 0)) & (
                        df['return_classifier'] > 0)].vol.sum()
            vol30_1130_buy = df[(df.time > datetime.time(11, 0)) & (df.time <= datetime.time(11, 30)) & (
                        df['return_classifier'] > 0)].vol.sum()
            vol30_1330_buy = df[(df.time > datetime.time(11, 30)) & (df.time <= datetime.time(13, 30)) & (
                        df['return_classifier'] > 0)].vol.sum()
            vol30_1400_buy = df[(df.time > datetime.time(13, 30)) & (df.time <= datetime.time(14, 0)) & (
                        df['return_classifier'] > 0)].vol.sum()
            vol30_1430_buy = df[(df.time > datetime.time(14, 0)) & (df.time <= datetime.time(14, 30)) & (
                        df['return_classifier'] > 0)].vol.sum()
            vol30_1500_buy = df[(df.time > datetime.time(14, 30)) & (df.time <= datetime.time(15, 0)) & (
                        df['return_classifier'] > 0)].vol.sum()

            vol30_1000_sell = df[(df.time >= datetime.time(9, 30)) & (df.time <= datetime.time(10, 0)) & (
                        df['return_classifier'] < 0)].vol.sum()
            vol30_1030_sell = df[(df.time > datetime.time(10, 0)) & (df.time <= datetime.time(10, 30)) & (
                        df['return_classifier'] < 0)].vol.sum()
            vol30_1100_sell = df[(df.time > datetime.time(10, 30)) & (df.time <= datetime.time(11, 0)) & (
                        df['return_classifier'] < 0)].vol.sum()
            vol30_1130_sell = df[(df.time > datetime.time(11, 0)) & (df.time <= datetime.time(11, 30)) & (
                        df['return_classifier'] < 0)].vol.sum()
            vol30_1330_sell = df[(df.time > datetime.time(11, 30)) & (df.time <= datetime.time(13, 30)) & (
                        df['return_classifier'] < 0)].vol.sum()
            vol30_1400_sell = df[(df.time > datetime.time(13, 30)) & (df.time <= datetime.time(14, 0)) & (
                        df['return_classifier'] < 0)].vol.sum()
            vol30_1430_sell = df[(df.time > datetime.time(14, 0)) & (df.time <= datetime.time(14, 30)) & (
                        df['return_classifier'] < 0)].vol.sum()
            vol30_1500_sell = df[(df.time > datetime.time(14, 30)) & (df.time <= datetime.time(15, 0)) & (
                        df['return_classifier'] < 0)].vol.sum()

            df_data = pd.DataFrame([
                vol30_1000_buy, vol30_1030_buy, vol30_1100_buy, vol30_1130_buy,
                vol30_1330_buy, vol30_1400_buy, vol30_1430_buy, vol30_1500_buy,
                vol30_1000_sell, vol30_1030_sell, vol30_1100_sell, vol30_1130_sell,
                vol30_1330_sell, vol30_1400_sell, vol30_1430_sell, vol30_1500_sell]).transpose()
        else:
            vol30_1000 = df[(df.time >= datetime.time(9, 30)) & (df.time <= datetime.time(10, 0))].vol.sum()
            vol30_1030 = df[(df.time > datetime.time(10, 0)) & (df.time <= datetime.time(10, 30))].vol.sum()
            vol30_1100 = df[(df.time > datetime.time(10, 30)) & (df.time <= datetime.time(11, 0))].vol.sum()
            vol30_1130 = df[(df.time > datetime.time(11, 0)) & (df.time <= datetime.time(11, 30))].vol.sum()
            vol30_1330 = df[(df.time > datetime.time(11, 30)) & (df.time <= datetime.time(13, 30))].vol.sum()
            vol30_1400 = df[(df.time > datetime.time(13, 30)) & (df.time <= datetime.time(14, 0))].vol.sum()
            vol30_1430 = df[(df.time > datetime.time(14, 0)) & (df.time <= datetime.time(14, 30))].vol.sum()
            vol30_1500 = df[(df.time > datetime.time(14, 30)) & (df.time <= datetime.time(15, 0))].vol.sum()
            df_data = pd.DataFrame([vol30_1000, vol30_1030, vol30_1100, vol30_1130,
                                    vol30_1330, vol30_1400, vol30_1430, vol30_1500]).transpose()
        return df_data

    def split_by_1h(self, df):
        if self.classify:
            vol60_1030_buy = df[(df.time >= datetime.time(9, 30)) & (df.time <= datetime.time(10, 30)) & (
                        df['return_classifier'] > 0)].vol.sum()
            vol60_1130_buy = df[(df.time > datetime.time(10, 30)) & (df.time <= datetime.time(11, 30)) & (
                        df['return_classifier'] > 0)].vol.sum()
            vol60_1400_buy = df[(df.time > datetime.time(11, 30)) & (df.time <= datetime.time(14, 0)) & (
                        df['return_classifier'] > 0)].vol.sum()
            vol60_1500_buy = df[(df.time > datetime.time(14, 0)) & (df.time <= datetime.time(15, 0)) & (
                        df['return_classifier'] > 0)].vol.sum()

            vol60_1030_sell = df[(df.time >= datetime.time(9, 30)) & (df.time <= datetime.time(10, 30)) & (
                        df['return_classifier'] < 0)].vol.sum()
            vol60_1130_sell = df[(df.time > datetime.time(10, 30)) & (df.time <= datetime.time(11, 30)) & (
                        df['return_classifier'] < 0)].vol.sum()
            vol60_1400_sell = df[(df.time > datetime.time(11, 30)) & (df.time <= datetime.time(14, 0)) & (
                        df['return_classifier'] < 0)].vol.sum()
            vol60_1500_sell = df[(df.time > datetime.time(14, 0)) & (df.time <= datetime.time(15, 0)) & (
                        df['return_classifier'] < 0)].vol.sum()
            df_data = pd.DataFrame([
                vol60_1030_buy, vol60_1130_buy, vol60_1400_buy, vol60_1500_buy,
                vol60_1030_sell, vol60_1130_sell, vol60_1400_sell, vol60_1500_sell]).transpose()
        else:
            vol60_1030 = df[(df.time >= datetime.time(9, 30)) & (df.time <= datetime.time(10, 30))].vol.sum()
            vol60_1130 = df[(df.time > datetime.time(10, 30)) & (df.time <= datetime.time(11, 30))].vol.sum()
            vol60_1400 = df[(df.time > datetime.time(11, 30)) & (df.time <= datetime.time(14, 0))].vol.sum()
            vol60_1500 = df[(df.time > datetime.time(14, 0)) & (df.time <= datetime.time(15, 0))].vol.sum()
            df_data = pd.DataFrame([vol60_1030, vol60_1130, vol60_1400, vol60_1500]).transpose()
        return df_data

    def run(self, factor_list):
        factors_fun_dict = {
            'sum_window_volume': 'self.sum_window_volume()',
            'until_window_volume': 'self.until_window_volume()',
            'mean_window_volume': 'self.mean_window_volume(self.window)',
            'vol_window_pct': 'self.vol_window_pct()'
        }
        results = []
        for iFactor in factor_list:
            results.append(eval(factors_fun_dict[iFactor]))
        return results


def volume_factor(factor_list, df, window, classify=False, code=None):
    if isinstance(factor_list, str):
        factor_list = [factor_list]
    class_volume_factors = VolumeFactors(factor_list, df, window, classify, code)
    results = class_volume_factors.run(factor_list)
    if len(results) == 1:
        return results
    else:
        df = results[0].copy()
        for iIndex in np.arange(len(results) - 1):
            df = pd.merge(df, results[iIndex + 1], on='date')
        return df


class FactorWindowStats(object):
    def __init__(self, feature_name, df, window, classify=True):
        self.df = df
        self.window = window
        self.classify = classify
        self.feature_name = feature_name
        #按window分割的df
        self.df_split = self.split_data(window)
        #得到分割后的特征名，用于生成其他因子
        self.columns_no_date = self.df_split.columns.tolist()
        self.columns_no_date.remove('date')

    def sum_window(self):
        return self.df_split

    def split_data(self, window):
        if window == "30m":
            df_data = self.df.groupby('date').apply(self.split_by_30min)
            df_data.reset_index(level=0, inplace=True)
            self.columns_no_date = df_data.columns.tolist()
            self.columns_no_date.remove('date')
            return df_data
        elif window == '60m':
            df_data = self.df.groupby('date').apply(self.split_by_1h)
            df_data.reset_index(level=0, inplace=True)
            self.columns_no_date = df_data.columns.tolist()
            self.columns_no_date.remove('date')
            return df_data
        elif window == '240m':
            return self.df
        else:
            print('Error: 输入的window出错，但是错误原因不明')

    #按时间段统计因子数据，只需要修改feature_name就可以统计不同的因子,很多方法后面都可以加到母类里
    def split_by_30min(self, df):

        feature_name = self.feature_name
        data_dict = {}
        df30_1000 = df[(df.time >= datetime.time(9, 30)) & (df.time <= datetime.time(10, 0))]
        df30_1030 = df[(df.time > datetime.time(10, 0)) & (df.time <= datetime.time(10, 30))]
        df30_1100 = df[(df.time > datetime.time(10, 30)) & (df.time <= datetime.time(11, 0))]
        df30_1300 = df[(df.time > datetime.time(11, 0)) & (df.time <= datetime.time(11, 30))]
        df30_1330 = df[(df.time > datetime.time(11, 30)) & (df.time <= datetime.time(13, 30))]
        df30_1400 = df[(df.time > datetime.time(13, 30)) & (df.time <= datetime.time(14, 0))]
        df30_1430 = df[(df.time > datetime.time(14, 0)) & (df.time <= datetime.time(14, 30))]
        df30_1500 = df[(df.time > datetime.time(14, 30)) & (df.time <= datetime.time(15, 0))]

        data_dict[feature_name + '30_1000'] = df30_1000[feature_name].sum()
        data_dict[feature_name + '30_1030'] = df30_1030[feature_name].sum()
        data_dict[feature_name + '30_1100'] = df30_1100[feature_name].sum()
        data_dict[feature_name + '30_1130'] = df30_1300[feature_name].sum()
        data_dict[feature_name + '30_1330'] = df30_1330[feature_name].sum()
        data_dict[feature_name + '30_1400'] = df30_1400[feature_name].sum()
        data_dict[feature_name + '30_1430'] = df30_1430[feature_name].sum()
        data_dict[feature_name + '30_1500'] = df30_1500[feature_name].sum()

        if self.classify:
            data_dict[feature_name + '30_1000_up'] = df30_1000[df30_1000['return_classifier'] > 0][feature_name].sum()
            data_dict[feature_name + '30_1030_up'] = df30_1030[df30_1030['return_classifier'] > 0][feature_name].sum()
            data_dict[feature_name + '30_1100_up'] = df30_1100[df30_1100['return_classifier'] > 0][feature_name].sum()
            data_dict[feature_name + '30_1130_up'] = df30_1300[df30_1300['return_classifier'] > 0][feature_name].sum()
            data_dict[feature_name + '30_1330_up'] = df30_1330[df30_1330['return_classifier'] > 0][feature_name].sum()
            data_dict[feature_name + '30_1400_up'] = df30_1400[df30_1400['return_classifier'] > 0][feature_name].sum()
            data_dict[feature_name + '30_1430_up'] = df30_1430[df30_1430['return_classifier'] > 0][feature_name].sum()
            data_dict[feature_name + '30_1500_up'] = df30_1500[df30_1500['return_classifier'] > 0][feature_name].sum()

            data_dict[feature_name + '30_1000_down'] = df30_1000[df30_1000['return_classifier'] < 0][feature_name].sum()
            data_dict[feature_name + '30_1030_down'] = df30_1030[df30_1030['return_classifier'] < 0][feature_name].sum()
            data_dict[feature_name + '30_1100_down'] = df30_1100[df30_1100['return_classifier'] < 0][feature_name].sum()
            data_dict[feature_name + '30_1130_down'] = df30_1300[df30_1300['return_classifier'] < 0][feature_name].sum()
            data_dict[feature_name + '30_1330_down'] = df30_1330[df30_1330['return_classifier'] < 0][feature_name].sum()
            data_dict[feature_name + '30_1400_down'] = df30_1400[df30_1400['return_classifier'] < 0][feature_name].sum()
            data_dict[feature_name + '30_1430_down'] = df30_1430[df30_1430['return_classifier'] < 0][feature_name].sum()
            data_dict[feature_name + '30_1500_down'] = df30_1500[df30_1500['return_classifier'] < 0][feature_name].sum()

        df_data = pd.DataFrame(data_dict, index=[0])

        return df_data



#        val30_1000 = df[(df.time >= datetime.time(9, 30)) & (df.time <= datetime.time(10, 0))][feature_name].sum()
#        val30_1030 = df[(df.time > datetime.time(10, 0)) & (df.time <= datetime.time(10, 30))][feature_name].sum()
#        val30_1100 = df[(df.time > datetime.time(10, 30)) & (df.time <= datetime.time(11, 0))][feature_name].sum()
#        val30_1130 = df[(df.time > datetime.time(11, 0)) & (df.time <= datetime.time(11, 30))][feature_name].sum()
#        val30_1330 = df[(df.time > datetime.time(11, 30)) & (df.time <= datetime.time(13, 30))][feature_name].sum()
#        val30_1400 = df[(df.time > datetime.time(13, 30)) & (df.time <= datetime.time(14, 0))][feature_name].sum()
#        val30_1430 = df[(df.time > datetime.time(14, 0)) & (df.time <= datetime.time(14, 30))][feature_name].sum()
#        val30_1500 = df[(df.time > datetime.time(14, 30)) & (df.time <= datetime.time(15, 0))][feature_name].sum()
#
#        if self.classify:
#
#            val30_1000_up = df[(df.time >= datetime.time(9, 30)) & (df.time <= datetime.time(10, 0)) & (
#                    df['return_classifier'] > 0)][feature_name].sum()
#            val30_1030_up = df[(df.time > datetime.time(10, 0)) & (df.time <= datetime.time(10, 30)) & (
#                    df['return_classifier'] > 0)][feature_name].sum()
#            val30_1100_up = df[(df.time > datetime.time(10, 30)) & (df.time <= datetime.time(11, 0)) & (
#                    df['return_classifier'] > 0)][feature_name].sum()
#            val30_1130_up = df[(df.time > datetime.time(11, 0)) & (df.time <= datetime.time(11, 30)) & (
#                    df['return_classifier'] > 0)][feature_name].sum()
#            val30_1330_up = df[(df.time > datetime.time(11, 30)) & (df.time <= datetime.time(13, 30)) & (
#                    df['return_classifier'] > 0)][feature_name].sum()
#            val30_1400_up = df[(df.time > datetime.time(13, 30)) & (df.time <= datetime.time(14, 0)) & (
#                    df['return_classifier'] > 0)][feature_name].sum()
#            val30_1430_up = df[(df.time > datetime.time(14, 0)) & (df.time <= datetime.time(14, 30)) & (
#                    df['return_classifier'] > 0)][feature_name].sum()
#            val30_1500_up = df[(df.time > datetime.time(14, 30)) & (df.time <= datetime.time(15, 0)) & (
#                    df['return_classifier'] > 0)][feature_name].sum()
#
#            val30_1000_down = df[(df.time >= datetime.time(9, 30)) & (df.time <= datetime.time(10, 0)) & (
#                    df['return_classifier'] < 0)][feature_name].sum()
#            val30_1030_down = df[(df.time > datetime.time(10, 0)) & (df.time <= datetime.time(10, 30)) & (
#                    df['return_classifier'] < 0)][feature_name].sum()
#            val30_1100_down = df[(df.time > datetime.time(10, 30)) & (df.time <= datetime.time(11, 0)) & (
#                    df['return_classifier'] < 0)][feature_name].sum()
#            val30_1130_down = df[(df.time > datetime.time(11, 0)) & (df.time <= datetime.time(11, 30)) & (
#                    df['return_classifier'] < 0)][feature_name].sum()
#            val30_1330_down = df[(df.time > datetime.time(11, 30)) & (df.time <= datetime.time(13, 30)) & (
#                    df['return_classifier'] < 0)][feature_name].sum()
#            val30_1400_down = df[(df.time > datetime.time(13, 30)) & (df.time <= datetime.time(14, 0)) & (
#                    df['return_classifier'] < 0)][feature_name].sum()
#            val30_1430_down = df[(df.time > datetime.time(14, 0)) & (df.time <= datetime.time(14, 30)) & (
#                    df['return_classifier'] < 0)][feature_name].sum()
#            val30_1500_down = df[(df.time > datetime.time(14, 30)) & (df.time <= datetime.time(15, 0)) & (
#                    df['return_classifier'] < 0)][feature_name].sum()
#
#            df_data = pd.DataFrame(
#                [val30_1000, val30_1030, val30_1100, val30_1130, val30_1330, val30_1400, val30_1430, val30_1500,
#                 val30_1000_up, val30_1030_up, val30_1100_up, val30_1130_up,
#                 val30_1330_up, val30_1400_up, val30_1430_up, val30_1500_up,
#                 val30_1000_down, val30_1030_down, val30_1100_down, val30_1130_down,
#                 val30_1330_down, val30_1400_down, val30_1430_down, val30_1500_down]).transpose()
#        else:
#
#            df_data = pd.DataFrame([val30_1000, val30_1030, val30_1100, val30_1130, val30_1330, val30_1400, val30_1430, val30_1500]).transpose()
#
#        return df_data
#

    def until_window(self):
        up_columns_no_date =[x for x in self.columns_no_date if x.find('up') >= 0]
        down_columns_no_date = [x for x in self.columns_no_date if x.find('down') >= 0]
        noclassify_columns_no_date = list(set(self.columns_no_date)-set(up_columns_no_date)-set(down_columns_no_date))
        noclassify_columns_no_date.sort()
        df_data = self.df_split.copy()

        for iCol in up_columns_no_date:
            index = up_columns_no_date.index(iCol)
            df_test =  df_data.loc[:, up_columns_no_date[max(0, index - 1)]: iCol]
            df_data.loc[:, iCol] = df_data.loc[:, up_columns_no_date[max(0, index - 1)]: iCol].sum(axis=1)
        for iCol in down_columns_no_date:
            index = down_columns_no_date.index(iCol)
            df_test = df_data.loc[:, down_columns_no_date[max(0, index - 1)]: iCol]
            df_data.loc[:, iCol] = df_data.loc[:, down_columns_no_date[max(0, index - 1)]: iCol].sum(axis=1)
        for iCol in noclassify_columns_no_date:
            index = noclassify_columns_no_date.index(iCol)
            df_test = df_data.loc[:, noclassify_columns_no_date[max(0, index - 1)]: iCol]
            df_data.loc[:, iCol] = df_data.loc[:, noclassify_columns_no_date[max(0, index - 1)]: iCol].sum(axis=1)

        df_data.rename(columns={iColumn: 'until_window_' + iColumn for iColumn in self.columns_no_date},
                       inplace=True)
        return df_data

    def window_pct(self):
        #window_pct_header = list(map(lambda x:'window_pct_'+x, self.columns_no_date))
        df_data = self.df_split.copy()
        df_data[self.columns_no_date] = df_data[self.columns_no_date] / df_data[self.columns_no_date].shift()
        df_data.rename(columns={iColumn: 'window_pct_' + iColumn for iColumn in self.columns_no_date}, inplace=True)
        df_data.replace(np.inf, 0, inplace=True)
        # df_data.fillna(method='bfill', inplace=True)
        return df_data.dropna(axis=0)

    def mean_window(self, window):
        df_data = self.df_split.copy()
        df_data.loc[:, self.columns_no_date] = df_data.loc[:, self.columns_no_date] / int(window[:-1])
        df_data.rename(columns={iColumn: 'mean_' + iColumn for iColumn in self.columns_no_date}, inplace=True)
        return df_data

    def run(self, factor_list):
        if isinstance(factor_list, str):
            factor_list = [factor_list]
        factors_fun_dict = {
            'sum_window': 'self.sum_window()',
            'until_window': 'self.until_window()',
            'mean_window': 'self.mean_window(self.window)',
            'window_pct': 'self.window_pct()'
        }
        results = []
        for iFactor in factor_list:
            results.append(eval(factors_fun_dict[iFactor]))

        if len(results) == 1:
            return results[0]
        else:
            df = results[0].copy()
            for iIndex in np.arange(len(results) - 1):
                df = pd.merge(df, results[iIndex + 1], on='date')
            return df


def select_data(df, date_column_str, start_date, end_date=None):
    """
    功能：按照指定日期来索引数据
    start_date/end_date: YYYY-MM-DD
    """
    arr_index = np.array(df.index)
    start_date = datetime.date(int(start_date[: 4]), int(start_date[5: 7]),
                               int(start_date[-2:]))
    try:
        start_index = arr_index[df[date_column_str] >= start_date][0]
        if end_date is not None:
            end_date = datetime.date(int(end_date[: 4]), int(end_date[5: 7]),
                                     int(end_date[-2:]))
            end_index = arr_index[df[date_column_str] <= end_date][-1]
            return df.iloc[start_index:end_index + 1, :]
        else:
            return df.iloc[start_index:, :]
    except Exception as e:
        print('Error: ', e)


def boruta_models(data, estimator='rfr', perc=100, n_estimators=100, random_state=0):
    """
    此函数为CTA_ruike定制，复用需要注意
    data: traindata list套dataframe;
        dataframe可以不是原来的7个；但是第一列需是y，第二列为日期
    estimator: 默认为rfr，随机森林回归;可选rfr, gbr(梯度下降);同时支持分类算法rfc, gbc
    perc: 阈值,默认为100;值越小,通过因子筛选越容易
    """
    # 定义boruta底层算法
    if estimator == 'rfr':
        estimator = RandomForestRegressor(n_estimators=n_estimators, random_state=random_state)
    elif estimator == 'gbr':
        estimator = GradientBoostingRegressor(n_estimators=n_estimators, random_state=random_state)
    elif estimator == 'rfc':
        estimator = RandomForestClassifier(n_estimators=n_estimators, random_state=random_state)
    elif estimator == 'gbc':
        estimator = GradientBoostingClassifier(n_estimators=n_estimators, random_state=random_state)

    factor = [[] for i in range(len(data))]
    for iModel in np.arange(len(data)):
        y_train = data[iModel].iloc[:, 0].astype('float').values.ravel()
        x_train = data[iModel].iloc[:, 2:].astype('float').values
        x_filtered, pass_num, pass_bool, rank = boruta_test(estimator, x_train, y_train, perc=perc)
        columns = pd.Series(data=data[iModel].columns[2:], index=rank)
        factor[iModel] = list(columns[pass_bool].sort_values())
    train_data_new = [data[i].loc[:, data[i].columns.tolist()[:2] + factor[i]] for i in range(len(data))]
    return train_data_new, factor


if __name__ == "__main__":
    # 标的代码
    code = '000300'

    # 总数据起止时间
    data_start_date = '2018-01-01'
    data_end_date = '2019-01-01'

    # 预测开始时间
    predict_start_date = '2018-10-01'

    # 训练集时长
    train_years = str(int((pd.to_datetime(predict_start_date) - pd.to_datetime(data_start_date)).days / 365))

    # 定义保存数据的文件名
    file_name = predict_start_date.split('-')[0] + '(' + train_years + ',' + code + ')_oldmodel_R30'

    # 训练ID,避免多个训练同时进行时，权重文件重叠
    train_id = '1'

    # 运行时间戳，用于记录程序运行时间
    time_start = time.time()

    # 获得总样本和训练样本
    class_data = GetData(data_source='ricequant', start_date=data_start_date, end_date=data_end_date, return_freq='30m')
    TimestampPrice = class_data.total_data()
    df_1min = class_data.data_1min_handle()

    # 特征工程：加入新的特征
    # 加入日级别的技术指标————df_whole
    # factors_list = ['BIAS.BIAS1', 'BIAS.BIAS2', 'BIAS.BIAS3', 'KDJ.K', 'KDJ.D',
    #                 'KDJ.J', 'MACD.MACD', 'TRIX.TRIX']
    # factors_list = ['MACD.MACD']
    # class_factor = TechFactor(freq='day', factors_list=factors_list,
    #                           start_date=data_start_date, end_date=data_end_date)
    # df_tech_factor = class_factor.run()
    # df_tech_factor = tech_features(df_tech_factor)

    # 数据统计周期
    window = '30m'
    #要对相应特征做哪些处理
    '''
    sum_window:窗口期内求和
    until_window:直到当前时间点的所有和
    mean_window:窗口期内求均值
    'window_pct':按窗口求环比
    '''
    factor_stats_method = ['sum_window','until_window','mean_window']#'until_window','mean_window'

    #统计成交量
#    class_vol = FactorWindowStats('vol', df_1min, window, classify=True)
#    df_vol=class_vol.run(factor_stats_method)
#    df_vol.rename(columns={'date': 'Date'}, inplace=True)    
    #修改列名进行测试
#    df_vol.columns = map(lambda x:x.replace('up','buy').replace('down','sell').replace('window', 'window_volume'),
#                                 df_vol.columns)
    '''
    原统计量的接口
    '''
    #加入分块的量指标————df_solo
    solo_factor_list = ['sum_window_volume', 'until_window_volume', 'mean_window_volume']  # 'vol_window_pct'#'until_window_volume',#'mean_window_volume'
    volume_factors1 = volume_factor(solo_factor_list, df_1min, window, classify=False, code=code)
    volume_factors2 = volume_factor(solo_factor_list, df_1min, window, classify=True, code=code)

    df_vol = pd.merge(volume_factors1, volume_factors2, on='date')

    #如果是期货数据，则要统计仓量
    if 'open_interest' in TimestampPrice.columns:
        #统计仓量
        class_openinterest = FactorWindowStats('open_interest', df_1min, window, classify=True)
        df_openinterest = class_openinterest.run(factor_stats_method)
        df_factor_solo = pd.merge(df_openinterest, df_vol, on='date')
    else:
        df_factor_solo = df_vol
    df_factor_solo.rename(columns={'date': 'Date'}, inplace=True)

    #修改列名进行测试
#    df_factor_solo.columns = map(lambda x:x.replace('buy','up').replace('sell','down').replace('window_volume','window'),
#                                 df_factor_solo.columns)

    # 构建feature_solo_matrix
    feature_solo_matrix = [[] for x in range(7)]
    solo_column_list = df_factor_solo.columns.tolist()
    time_list = ['1000', '1030', '1100', '1130', '1330', '1400', '1430']
    for iIndex in range(len(feature_solo_matrix)):
        index_list = [time_list[iIndex] in i for i in solo_column_list]
        feature_solo_matrix[iIndex] = np.array(solo_column_list)[index_list].tolist()

    traindata = class_data.get_traindata(
        TimestampPrice, csv_flag=False,
        df_solo=df_factor_solo, feature_solo_matrix=feature_solo_matrix)
    del time_list, solo_column_list

    # 因子筛选-随机森林回归(y为连续数值)
    traindata_rfr85, rfr_factor85 = boruta_models(data=traindata, estimator='rfr', perc=85)
    # 因子筛选-随机森林分类(y为有限离散数值)
    # traindata_rfc85, rfc_factor85 = boruta_models(data=traindata, estimator='rfc', perc=85)

    # 训练模型， 获取开仓信号
    # 新增了train_id，避免多个训练同时进行时，权重文件重叠
    class_signal = TradeSignal(traindata_rfr85, predict_start_date, train_engine='keras', train_id=train_id)
    TimestampSignal = class_signal.get_TimestampSignal()

    # 获得回测结果
    class_return = Return()
    datevalues, date_val, df_action = class_return.get_backtest_result(TimestampPrice)
    time_end = time.time()
    print(time_end - time_start)

    # 要保存的数据

    # 回测曲线
    plt.figure(figsize=(10, 6))
    datevalues['Value'].plot(legend=True)
    # datevalues.to_csv('data/datevalues.csv', index=None)
    df_return, return_column = class_return.hs300_pre_close_return(
        start_date=str(datevalues.index[0]),
        end_date=str(datevalues.index[-1]))
    df_return[return_column].plot(legend=True)
    plt.savefig(r'backtest\\' + code + '\\' + file_name + '.png')
    plt.show()
    ##预测值
    TimestampSignal.to_csv(r'timestamp_pred/' + file_name + '.csv', index=None)
    # 多空信号
    df_action.to_csv(r'action/' + code + '/' + file_name + '_action.csv', index=None)

    # # 存特征数据
    # # TimestampPrice.to_csv(
    # # 	r'./data/volume_factor_data/TimestampPrice' + data_start_date + ' ' + data_end_date + ' ' + predict_start_date + '.csv')
    # # #存信号数据
    # # TimestampSignal.to_csv(
    # # 	r'./data/volume_factor_data/TimestampSignal_' + data_start_date + '_' + data_end_date + '_' + predict_start_date + '.csv')
    # # class_signal.action_list.to_csv(r'./data/volume_factor_data/pred_trade_signal' + data_start_date + ' ' + data_end_date + ' ' + predict_start_date + '.csv')
