#!/usr/bin/python
# -*-coding:utf-8-*-
import pandas as pd
import numpy as np
import os
import statsmodels.api as sm

from zg_data_process.support.utils import one_hot

# from zg02_factor_lib.base.factors_library_base import NewFactorLib as DataReader
from zbc_factor_lib.base.factors_library_base import NewRQFactorLib as DataReader

data_reader = DataReader()

ci1_unique_list = data_reader.read_basic_data_table('processed_ci1_industry_index_info_data')

ci1_unique_list = ci1_unique_list.sort_values('index_code')

ci1_unique_list = ci1_unique_list['index_code'].tolist()

ci1_unique_list.append('CI005000')

class BasicClass(object):
    def __init__(self):
        pass

# TODO - 缺失值填补
class MissingDataProcess(BasicClass):
    def __init__(self):
        pass

    def drop_missing(self, df):
        return df.dropna()

    def filled_with_market_stats_value(self, df, columns, q=None, copy=True):
        '''
        市场统计值进行缺失值处理

        :param df: to process pandas dataframe
        :param columns: to process pandas dataframe columns
        :param q: default is None, using market mean value to fill nan, can be float to get q-value of the market
        :return:
        '''

        # TODO - record index
        if copy:
            df = df.copy()

        # 计算前，需要先去掉行业重复的值（源于单只股票可能会有多个行业标签）
        df_used = df.drop_duplicates(['stock_code', 'date'], keep='first')

        df_used['trade_date'] = df_used['date'].copy()
        df = df.set_index(['stock_code', 'date'])
        df_used = df_used.set_index(['stock_code', 'date'])

        # TODO - 计算Market Stats Value
        if q is None:
            market_values = df_used.groupby('trade_date')[columns].mean()
        else:
            market_values = df_used.groupby('trade_date')[columns].quantile(q)
        del df_used

        # TODO - 市场补缺失值
        for col in columns:
            temp = df.loc[df[col].isnull(), col]

            fill_date = temp.reset_index('date')['date']

            df.loc[df[col].isnull(), col] = \
                market_values.loc[fill_date.values, col].values

        # check
        # df.loc['603916.SH']
        # df.loc['603897.SH']
        return df.reset_index()

    def filled_with_ci1_stats_value(self, df, columns, q=None, copy=True):
        '''
          中信一级行业的统计值进行缺失值处理

          :param df: to process pandas dataframe
          :param columns: to process pandas dataframe columns
          :param q: default is None, using market mean value to fill nan, can be float to get q-value of the market
          :return:
          '''

        # TODO - record index
        if copy:
            df = df.copy()

        df['trade_date'] = df['date'].copy()
        df['industry_id'] = df['ci1_code'].copy()
        df = df.set_index(['stock_code', 'date', 'ci1_code'])

        # TODO - 计算Market Stats Value
        if q is None:
            industrial_values = df.groupby(['trade_date', 'industry_id'])[columns].mean()
        else:
            industrial_values = df.groupby(['trade_date', 'industry_id'])[columns].quantile(q)

        # TODO - 市场补缺失值
        for col in columns:
            temp = df.loc[df[col].isnull(), col]
            fill_index = temp.reset_index(['date', 'ci1_code']).set_index(['date', 'ci1_code']).index
            df.loc[df[col].isnull(), col] = \
                industrial_values.loc[fill_index.values, col].values

        df.drop(['trade_date', 'industry_id'], axis=1, inplace=True)

        # check
        # df.loc['603916.SH']
        # df.loc['603897.SH']
        # industrial_values_temp = industrial_values.reset_index()
        # industrial_values_temp[industrial_values_temp['industry_id'] == 'CI005030']
        return df.reset_index()

    def filled_with_sw1_stats_value(self, df, columns, q=None, copy=True):
        '''
          申万一级行业的统计值进行缺失值处理

          :param df: to process pandas dataframe
          :param columns: to process pandas dataframe columns
          :param q: default is None, using market mean value to fill nan, can be float to get q-value of the market
          :return:
          '''

        # TODO - record index
        if copy:
            df = df.copy()

        df['trade_date'] = df['date'].copy()
        df['industry_id'] = df['sw1_code'].copy()
        df = df.set_index(['stock_code', 'date', 'sw1_code'])

        # TODO - 计算Market Stats Value
        if q is None:
            industrial_values = df.groupby(['trade_date', 'industry_id'])[columns].mean()
        else:
            industrial_values = df.groupby(['trade_date', 'industry_id'])[columns].quantile(q)

        # TODO - 市场补缺失值
        for col in columns:
            temp = df.loc[df[col].isnull(), col]
            fill_index = temp.reset_index(['date', 'sw1_code']).set_index(['date', 'sw1_code']).index
            df.loc[df[col].isnull(), col] = \
                industrial_values.loc[fill_index.values, col].values

        df.drop(['trade_date', 'industry_id'], axis=1, inplace=True)

        # check
        # df.loc['603916.SH']
        # df.loc['603897.SH']
        # industrial_values_temp = industrial_values.reset_index()
        # industrial_values_temp[industrial_values_temp['industry_id'] == 'CI005030']
        return df.reset_index()

    def _fill_with_fixed_value(self, df, column, fill_value, missing_pct, verbose=True):
        df = df.copy()

        missing_pct_val = (df[column].isnull()).sum() / df.shape[0]

        if missing_pct_val >= missing_pct:
            df[column] = fill_value

            if verbose:
                td = df['date'].unique()[0]
                print(column, td, 'has missing value %.4f, so fill with %.3f...' % (missing_pct_val, fill_value))

        return df

    def cs_filled_with_fixed_value(self, df, columns, fill_value=0.0, missing_pct=0.4, verbose=True, copy=True):
        '''
             在横截面，如果缺失值达到一定比例，则进行固定值替代

             :param df: to process pandas dataframe
             :param columns: to process pandas dataframe columns
             :param fill_value: to fill missing value
             :param missing_pct: if missing value pct >= missing_pct, then all value will be replace by "fill_value"
             :return:
        '''

        # TODO - record index
        if copy:
            df = df.copy()

        # # 测试使用
        # def _fill_with_fixed_value(df, column, fill_value, missing_pct):
        #     df = df.copy()
        #     # today = df['date'].unique()[0]
        #
        #     missing_pct_val = (df[column].isnull()).sum() / df.shape[0]
        #
        #     if missing_pct_val >= missing_pct:
        #         df[column] = fill_value
        #
        #     # print(today, 'missing pct is %.6f' % (missing_pct_val))
        #
        #     return df
        #
        # import numpy as np
        # idx = np.random.choice(df.shape[0], df.shape[0], replace=False)
        # df = df.iloc[idx, :]

        # TODO - 排序，主要保证处理后的数据与原来数据保持一致
        # 需要保证groupby中by是排序过的，这样才能使得处理后的数据与原来数据保持一致
        df = df.sort_values(['date', 'stock_code'])

        # TODO - 市场补缺失值
        for col in columns:
            df_fill_process = df.groupby('date', as_index=False, sort=False, group_keys=False).apply(
                self._fill_with_fixed_value, col, fill_value, missing_pct, verbose
            )
            # df_fill_process = df.groupby('date', as_index=False, sort=False, group_keys=False).apply(_fill_with_fixed_value, col, fill_value, missing_pct)

            df[col] = df_fill_process[col].values

            # (df.index != df_fill_process.index).sum()
            # check_idx = np.random.choice(df.shape[0], 30, replace=False)
            #
            # df_fill_process[['stock_code', 'date', col]].head(20)
            # df[['stock_code', 'date', col]].head(20)
            #
            # df_fill_process[['stock_code', 'date', col]].iloc[check_idx]
            # df[['stock_code', 'date', col]].iloc[check_idx]

        return df.reset_index(drop=True)

    def cs_filled_with_cap_ci1_predictive_value(self, df, columns, verbose=False, copy=True):
        '''
             在横截面，利用因子对于市值和中信一级行业进行回归，对缺失值进行补值

             :param df: to process pandas dataframe
             :param columns: to process pandas dataframe columns
             :param fill_value: to fill missing value
             :param missing_pct: if missing value pct >= missing_pct, then all value will be replace by "fill_value"
             :return:
        '''

        # TODO - record index
        if copy:
            df = df.copy()

        # TODO - for test
        # cache_factor_dir = '/db/zg_factors_lib/cache'
        # concat_factor_data_filename = 'module_test_concat_data_ci1'
        # concat_factor_data = pd.read_hdf(os.path.join(cache_factor_dir, concat_factor_data_filename + '.h5'))
        #
        # columns = ['quality_rroc_linear_4q', 'mom_industrialadjust_1m']

        # e
        # data = df.copy()
        # factor_name = 'quality_rroc_linear_4q'
        #
        # df = df[df['date'] == '2019-09-26'].copy()
        # df = concat_factor_data.copy()

        ''''''
        # ci1_unique = [
        #     'CI005021',
        #     'CI005023',
        #     'CI005018',
        #     'CI005003',
        #     'CI005010',
        #     'CI005008',
        #     'CI005016',
        #     'CI005019',
        #     'CI005025',
        #     'CI005013',
        #     'CI005014',
        #     'CI005004',
        #     'CI005029',
        #     'CI005015',
        #     'CI005020',
        #     'CI005001',
        #     'CI005026',
        #     'CI005007',
        #     'CI005027',
        #     'CI005024',
        #     'CI005028',
        #     'CI005017',
        #     'CI005011',
        #     'CI005005',
        #     'CI005006',
        #     'CI005009',
        #     'CI005002',
        #     'CI005022',
        #     'CI005012',
        #     'CI005000',
        # ]
        ci1_unique = ci1_unique_list

        ci1_unique = {sorted(ci1_unique)[i]: i for i in range(len(ci1_unique))}

        def _fill_with_cap_ci1_ols_predictive_value(df, factor_name, verbose=False):
            df = df.copy()

            ind_data = one_hot(df['ci1_code_id'].values, len(ci1_unique))

            cap_ind_data = df[['scale_total_market_size']].copy()

            cap_ind_data = np.concatenate((cap_ind_data.values, ind_data), axis=1)

            exog_non_nan = cap_ind_data[df[factor_name].notnull()]
            endog_non_nan = df.loc[df[factor_name].notnull(), factor_name].values

            # TODO - 数据量太少，则直接均值填充
            if exog_non_nan.shape[0] < df.shape[0] // 10:
                is_fill_with_mean_value = True
            else:
                is_fill_with_mean_value = False

            if is_fill_with_mean_value:
                mkt_mean_value = df[factor_name].mean(skipna=True)

                df[factor_name] = df[factor_name].fillna(mkt_mean_value)

                if verbose:
                    td = df['date'].unique()[0]
                    print(td, 'fill with market mean value...')

                return df

            reg = sm.OLS(endog=endog_non_nan,
                         exog=exog_non_nan,
                         hasconst=False)

            reg = reg.fit()

            to_fill_exog_non_nan = cap_ind_data[df[factor_name].isnull()]

            # 缺失值填充
            reg.predict(to_fill_exog_non_nan)

            # to_fill_exog_non_nan[2, :]
            # df[df[factor_name].isnull()].iloc[2]

            df.loc[df[factor_name].isnull(), factor_name] = reg.predict(to_fill_exog_non_nan)

            if verbose:
                td = df['date'].unique()[0]
                print(td, 'fill with cap and ci1 predictive value...')

            return df

        '''TODO - 排序，主要保证处理后的数据与原来数据保持一致'''
        # 需要保证groupby中by是排序过的，这样才能使得处理后的数据与原来数据保持一致
        df = df.sort_values(['date', 'stock_code'])

        # TODO - 去掉市值和行业为空的数据
        df = df[df[['scale_total_market_size', 'ci1_code']].isnull().sum(axis=1) == 0]

        df['ci1_code_id'] = df['ci1_code'].map(lambda x: ci1_unique[x])

        # TODO - 预测补缺失值
        for col in columns:
            df_fill_process = \
                df.groupby('date', as_index=False, sort=False, group_keys=False).apply(
                    _fill_with_cap_ci1_ols_predictive_value, col, verbose
                )

            df[col] = df_fill_process[col].values

            # df[['stock_code', 'date']].tail(20)
            # df_fill_process[['stock_code', 'date']].tail(20)

        return df.reset_index(drop=True)

# TODO - 数据过滤
class DataFilteration(BasicClass):
    def single_label_filteration(self, df, label_name, rule, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df[label_name].map(rule), :]

        if drop_label:
            df.drop(label_name, axis=1, inplace=True)

        return df

    def st_filteration(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['is_st'] == 0.0, :]

        if drop_label:
            df.drop('is_st', axis=1, inplace=True)

        return df

    def paused_stock_filtration(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['paused'] == 0.0, :]

        if drop_label:
            df.drop('paused', axis=1, inplace=True)

        return df

    def oneline_stock_filtration(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[(df['up_one_line'] == 0.0) & (df['down_one_line'] == 0.0), :]

        if drop_label:
            df.drop(['up_one_line', 'down_one_line'], axis=1, inplace=True)

        return df

    def up_oneline_stock_filtration(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['up_one_line'] == 0.0, :]

        if drop_label:
            df.drop(['up_one_line'], axis=1, inplace=True)

        return df

    def down_oneline_stock_filtration(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['down_one_line'] == 0.0, :]

        if drop_label:
            df.drop(['down_one_line'], axis=1, inplace=True)

        return df

    def ci1_industry_filtration(self, df, drop_ci1_code=[], drop_ci1_name=[], drop_label=False, copy=True):
        if copy:
            df = df.copy()

        if len(drop_ci1_code) > 0:
            df = df.loc[~df['ci1_code'].isin(drop_ci1_code), :]

            if drop_label:
                df.drop('ci1_code', axis=1, inplace=True)

        if len(drop_ci1_name) > 0:
            df = df.loc[~df['ci1_name'].isin(drop_ci1_name), :]

            if drop_label:
                df.drop('ci1_name', axis=1, inplace=True)

        return df

    def sw1_industry_filtration(self, df, drop_sw1_code=[], drop_sw1_name=[], drop_label=False, copy=True):
        if copy:
            df = df.copy()

        if len(drop_sw1_code) > 0:
            df = df.loc[~df['sw1_code'].isin(drop_sw1_code), :]

            if drop_label:
                df.drop('sw1_code', axis=1, inplace=True)

        if len(drop_sw1_name) > 0:
            df = df.loc[~df['sw1_name'].isin(drop_sw1_name), :]

            if drop_label:
                df.drop('sw1_name', axis=1, inplace=True)

        return df

    def new_stock_filtration(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['is_new'] == 0.0, :]

        if drop_label:
            df.drop('is_new', axis=1, inplace=True)

        return df

    def new_subnew_stock_filtration(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[(df['is_new'] == 0.0) &
                    (df['is_sub_new'] == 0.0), :]

        if drop_label:
            df.drop(['is_new', 'is_sub_new'], axis=1, inplace=True)

        return df

    def list_day_filtration(self, df, min_list_day=360, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['list_day'] > min_list_day, :]

        if drop_label:
            df.drop('list_day', axis=1, inplace=True)

        return df

    def get_csi300_component_data(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['is_csi300'] == 1.0, :]

        if drop_label:
            df.drop('is_csi300', axis=1, inplace=True)

        return df

    def get_csi500_component_data(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['is_csi500'] == 1.0, :]

        if drop_label:
            df.drop('is_csi500', axis=1, inplace=True)

        return df

    def get_sh50_component_data(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['is_sh50'] == 1.0, :]

        if drop_label:
            df.drop('is_sh50', axis=1, inplace=True)

        return df

    # new added
    def get_cybz_component_data(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['is_cybz'] == 1.0, :]

        if drop_label:
            df.drop('is_cybz', axis=1, inplace=True)

        return df

    def get_cyb100_component_data(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['is_cyb100'] == 1.0, :]

        if drop_label:
            df.drop('is_cyb100', axis=1, inplace=True)

        return df

    def get_cyb50_component_data(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['is_cyb50'] == 1.0, :]

        if drop_label:
            df.drop('is_cyb50', axis=1, inplace=True)

        return df

    def get_csi1000_component_data(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['is_csi1000'] == 1.0, :]

        if drop_label:
            df.drop('is_csi1000', axis=1, inplace=True)

        return df

    def get_csi100_component_data(self, df, drop_label=True, copy=True):
        if copy:
            df = df.copy()

        df = df.loc[df['is_csi100'] == 1.0, :]

        if drop_label:
            df.drop('is_csi100', axis=1, inplace=True)

        return df

# TODO - 数据去重
class DuplicateProcssing(BasicClass):
    def duplicates_merge(self, df, columns, rule='mean', copy=True):
        '''
        将重复因子按照一定relu进行合并，主要是在去掉中信/申万一级行业导致的重复数据

        :param df:
        :param columns:
        :param rule:
        :param copy:
        :return:
        '''

        if copy:
            df = df.copy()

        # no_process_columns = list(set(df.columns).difference(columns))
        if rule == 'mean':
            df_unique = df.groupby(['stock_code', 'date'])[columns].mean()
        elif rule == 'max':
            df_unique = df.groupby(['stock_code', 'date'])[columns].max()
        elif rule == 'min':
            df_unique = df.groupby(['stock_code', 'date'])[columns].min()
        else:
            raise 'your input rule cannot be found!'

        df = df.drop_duplicates(['stock_code', 'date'], keep='last')
        df = df.set_index(['stock_code', 'date'])
        df[columns] = df_unique.loc[df.index, columns]

        return df.reset_index()

    def simple_duplicates_process(self, df, keep='first', copy=True):
        '''
        去掉任何一个值

        :param df:
        :param columns:
        :param rule:
        :param copy:
        :return:
        '''
        if copy:
            df = df.copy()

        df = df.drop_duplicates(['stock_code', 'date'], keep=keep)

        return df

# TODO - 离异值（极值）处理
class OutlierProcessing(BasicClass):
    def _mad_winsorize(self, factor, n=3*1.4826, drop=False, verbose=True):
        '''MAD 去极值的函数，用于横截面处理'''
        data = factor.copy()
        factor_names = data.columns

        D_M = data.median()
        ls_deviation = np.abs(data - D_M)
        D_MAD = ls_deviation.median()

        lower = D_M - n * D_MAD
        upper = D_M + n * D_MAD

        for factor_name in factor_names:
            factor_lower = lower[factor_name]
            factor_upper = upper[factor_name]
            if not drop:
                nan_location = data[factor_name].isnull()
                data.loc[data[factor_name] < factor_lower, factor_name] = factor_lower
                data.loc[data[factor_name] > factor_upper, factor_name] = factor_upper
                data.loc[nan_location, factor_name] = np.nan
            else:
                # TODO - 直接去掉
                data = data.loc[data[factor_name] >= factor_lower]
                data = data.loc[data[factor_name] <= factor_upper]

        if verbose:
            print(data.index.unique()[0][1].strftime('%Y-%m-%d'), 'done!')

        return data

    def _std_winsorize(self, factor, n=3, drop=False, verbose=True):
        '''三倍标准差去极值的函数，用于横截面处理'''
        data = factor.copy()
        factor_names = data.columns

        mean = data.mean()
        std= data.std()

        lower =mean- n *std
        upper =mean + n * std

        for factor_name in factor_names:
            factor_lower = lower[factor_name]
            factor_upper = upper[factor_name]
            if not drop:
                nan_location = data[factor_name].isnull()
                data.loc[data[factor_name] < factor_lower, factor_name] = factor_lower
                data.loc[data[factor_name] > factor_upper, factor_name] = factor_upper
                data.loc[nan_location, factor_name] = np.nan
            else:
                # TODO - 直接去掉
                data = data.loc[data[factor_name] >= factor_lower]
                data = data.loc[data[factor_name] <= factor_upper]

        if verbose:
            print(data.index.unique()[0][1].strftime('%Y-%m-%d'), 'done!')

        return data

    def _quantile_winsorize(self, factor, pect=0.05, drop=False, verbose=True):
        '''分位数去极值法'''
        data = factor.copy()
        factor_names = data.columns

        lowerbound=pect/2.0
        upperbound=1-pect/2.0

        lower =data.quantile(lowerbound)
        upper =data.quantile(upperbound)

        for factor_name in factor_names:
            factor_lower = lower[factor_name]
            factor_upper = upper[factor_name]
            if not drop:
                nan_location = data[factor_name].isnull()
                data.loc[data[factor_name] < factor_lower, factor_name] = factor_lower
                data.loc[data[factor_name] > factor_upper, factor_name] = factor_upper
                data.loc[nan_location, factor_name] = np.nan
            else:
                # TODO - 直接去掉
                data = data.loc[data[factor_name] >= factor_lower]
                data = data.loc[data[factor_name] <= factor_upper]

        if verbose:
            print(data.index.unique()[0][1].strftime('%Y-%m-%d'), 'done!')

        return data

    def _boxplot_winsorize(self, factor,n=3, drop=False, verbose=True):
        '''简单Boxplot法去极值法'''
        data = factor.copy()
        factor_names = data.columns
        D_M = data.median()
        Q1=data.quantile(0.25)
        Q3=data.quantile(0.75)
        IQR=Q3-Q1
        lower = Q1 - n * IQR
        upper = Q1 + n * IQR

        for factor_name in factor_names:
            factor_lower = lower[factor_name]
            factor_upper = upper[factor_name]
            if not drop:
                nan_location = data[factor_name].isnull()
                data.loc[data[factor_name] < factor_lower, factor_name] = factor_lower
                data.loc[data[factor_name] > factor_upper, factor_name] = factor_upper
                data.loc[nan_location, factor_name] = np.nan
            else:
                # TODO - 直接去掉
                data = data.loc[data[factor_name] >= factor_lower]
                data = data.loc[data[factor_name] <= factor_upper]

        if verbose:
            print(data.index.unique()[0][1].strftime('%Y-%m-%d'), 'done!')

        return data

    def _medcouple_winsorize(self, factor,n=1.5, drop=False, verbose=True):
        '''medcouple法去极值法'''
        data = factor.copy()
        factor_names = data.columns
        D_M = data.median()
        MC=sm.stats.stattools.medcouple(data)
        Q1=data.quantile(0.25)
        Q3=data.quantile(0.75)
        IQR=Q3-Q1
        lower=upper=pd.Series(index=factor_names)
        for i in range(len(MC)):
            if MC[i]>0:
                lower[factor_names[i]]=Q1[factor_names[i]]-n*np.exp(-3.5*MC[i])*IQR[factor_names[i]]
                upper[factor_names[i]] = Q3[factor_names[i]] + n * np.exp(4*MC[i]) * IQR[factor_names[i]]
            else:
                lower[factor_names[i]]=Q1[factor_names[i]]-n*np.exp(-4*MC[i])*IQR[factor_names[i]]
                upper[factor_names[i]] = Q3[factor_names[i]] + n * np.exp(3.5*MC[i]) * IQR[factor_names[i]]

        for factor_name in factor_names:
            factor_lower = lower[factor_name]
            factor_upper = upper[factor_name]
            if not drop:
                nan_location = data[factor_name].isnull()
                data.loc[data[factor_name] < factor_lower, factor_name] = factor_lower
                data.loc[data[factor_name] > factor_upper, factor_name] = factor_upper
                data.loc[nan_location, factor_name] = np.nan
            else:
                # TODO - 直接去掉
                data = data.loc[data[factor_name] >= factor_lower]
                data = data.loc[data[factor_name] <= factor_upper]

        if verbose:
            print(data.index.unique()[0][1].strftime('%Y-%m-%d'), 'done!')

        return data

    def cs_medcouple_outlier_process(self, df, columns,n=1.5, drop=False, copy=True, verbose=False):
        '''
        Cross-sectional medcouple法去极值

        :param df:
        :param columns: 要处理的特征
        :param n:       上下限参数
        :param drop:    是否去掉极值
        :param copy:
        :param verbose: 是否输出信息
        :return:
        '''
        if copy:
            df = df.copy()

        df_used = df.copy()
        df_used['trade_date'] = df['date'].copy()

        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

        df = df.set_index(['stock_code', 'date'])
        df_used = df_used.set_index(['stock_code', 'date'])

        df_filter = df_used.groupby('trade_date')[columns].apply(self._medcouple_winsorize,n, drop, verbose)

        df[columns] = df_filter.loc[df.index, columns]

        return df.reset_index()

    def cs_boxplot_outlier_process(self, df, columns,n=3, drop=False, copy=True, verbose=False):
        '''
        Cross-sectional 简单boxplot法去极值

        :param df:
        :param columns: 要处理的特征
        :param pect:   上下限参数
        :param drop:    是否去掉极值
        :param copy:
        :param verbose: 是否输出信息
        :return:
        '''
        if copy:
            df = df.copy()

        df_used = df.copy()
        df_used['trade_date'] = df['date'].copy()

        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

        df = df.set_index(['stock_code', 'date'])
        df_used = df_used.set_index(['stock_code', 'date'])

        df_filter = df_used.groupby('trade_date')[columns].apply(self._boxplot_winsorize,n, drop, verbose)

        df[columns] = df_filter.loc[df.index, columns]

        return df.reset_index()

    def cs_quantile_outlier_process(self, df, columns, pect=0.05, drop=False, copy=True, verbose=False):
        '''
        Cross-sectional 分位数法去极值

        :param df:
        :param columns: 要处理的特征
        :param pect:    去极值的分位点
        :param drop:    是否去掉极值
        :param copy:
        :param verbose: 是否输出信息
        :return:
        '''
        if copy:
            df = df.copy()

        df_used = df.copy()
        df_used['trade_date'] = df['date'].copy()

        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

        df = df.set_index(['stock_code', 'date'])
        df_used = df_used.set_index(['stock_code', 'date'])

        df_filter = df_used.groupby('trade_date')[columns].apply(self._quantile_winsorize,pect, drop, verbose)

        df[columns] = df_filter.loc[df.index, columns]

        return df.reset_index()

    def cs_std_outlier_process(self, df, columns, n=3, drop=False, copy=True, verbose=False):
        '''
        Cross-sectional 标准差法去极值

        :param df:
        :param columns: 要处理的特征
        :param n:       n倍标准差
        :param drop:    是否去掉极值
        :param copy:
        :param verbose: 是否输出信息
        :return:
        '''
        if copy:
            df = df.copy()

        df_used = df.copy()
        df_used['trade_date'] = df['date'].copy()

        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

        df = df.set_index(['stock_code', 'date'])
        df_used = df_used.set_index(['stock_code', 'date'])

        df_filter = df_used.groupby('trade_date')[columns].apply(self._std_winsorize, n, drop, verbose)

        df[columns] = df_filter.loc[df.index, columns]

        return df.reset_index()

    def cs_mad_outlier_process(self, df, columns, n=3*1.4826, drop=False, copy=True, verbose=False):
        '''
        Cross-sectional 横截面MAD法去极值

        :param df:
        :param columns: 要处理的特征
        :param n:       MAD法参数
        :param drop:    是否去掉极值
        :param copy:
        :param verbose: 是否输出信息
        :return:
        '''
        if copy:
            df = df.copy()

        df_used = df.copy()
        df_used['trade_date'] = df['date'].copy()

        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

        df = df.set_index(['stock_code', 'date'])
        df_used = df_used.set_index(['stock_code', 'date'])

        df_filter = df_used.groupby('trade_date')[columns].apply(self._mad_winsorize, n, drop, verbose)

        df[columns] = df_filter.loc[df.index, columns]

        return df.reset_index()

    def cs_normal_dist_outlier_limit(self):
        '''Cross-sectional 分布法去极值'''
        pass

    def cs_rank_outlier_limit(self):
        '''Cross-sectional 排序法去极值'''
        pass

# TODO -  归一化和中性化处理
class DataTransformation(BasicClass):
    def _z_score_normalization(self, factor,verbose=True):
        '''z_score标准化，用于横截面处理'''
        data = factor.copy()
        factor_names = data.columns

        data_normalization=(data-data.mean()) / data.std()

        # TODO - std 为0的时候进行处理
        data_normalization.loc[:, data.std() == 0] = 0.0

        data[factor_names] = data_normalization

        if verbose:
            print(data.index.unique()[0][1].strftime('%Y-%m-%d'), 'done!')

        return data

    def _rank_normalization(self, factor,verbose=True):
        '''秩标准化，用于横截面处理'''
        data = factor.copy()
        factor_names = data.columns
        data_rank=data.rank()
        data_rank_norm=(data_rank-data_rank.mean())/data_rank.std()
        data[factor_names]=data_rank_norm
        if verbose:
            print(data.index.unique()[0][1].strftime('%Y-%m-%d'), 'done!')
        return data[factor_names]

    def _weight_normalization(self, df,weight_column='scale_total_market_size',verbose=True):
        '''加权标准化，用于横截面处理'''
        # 输入数据包含权重列

        data =df.copy()

        columns=data.columns

        factor_names = columns.drop(weight_column)

        df_used=data.dropna()

        weight_mean = df_used[factor_names].apply(lambda x: np.average(x, weights=df_used[weight_column]))

        data_weight_norm=(data[factor_names]-weight_mean)/data[factor_names].std()

        data[factor_names]=data_weight_norm

        if verbose:
            print(data.index.unique()[0][1].strftime('%Y-%m-%d'), 'done!')

        return data

    def cs_ci1_ind_normalization_process(self, df, columns):
        '''
          中信一级行业的统计值进行归一化处理

          :param df: to process pandas dataframe
          :param columns: to process pandas dataframe columns
          :param q: default is None, using ind mean value to calculate mean, can be float to get q-value of the ind
          :return:
          '''

        # TODO - record index
        # if copy:
        #     df_used = df.copy()

        df_used = df.copy()
        df_used['trade_date'] = df_used['date'].copy()
        df_used['industry_id'] = df_used['ci1_code'].copy()
        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')
        df_used =df_used.set_index(['stock_code', 'date', 'ci1_code'])
        df.set_index(['stock_code', 'date', 'ci1_code'],inplace=True)
        df_normalization =df_used.groupby(['trade_date','industry_id'])[columns].apply(lambda x:(x-x.mean())/x.std())

        df[columns] = df_normalization.loc[df.index, columns]

        return df.reset_index()

    def cs_sw1_ind_normalization_process(self, df, columns):
        '''
          申万一级行业的统计值进行归一化处理

          :param df: to process pandas dataframe
          :param columns: to process pandas dataframe columns
          :return:
        '''

        # TODO - record index
        # if copy:
        #     df_used = df.copy()

        df_used = df.copy()
        df_used['trade_date'] = df_used['date'].copy()
        df_used['industry_id'] = df_used['sw1_code'].copy()

        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')
        df_used =df_used.set_index(['stock_code', 'date', 'sw1_code'])
        df.set_index(['stock_code', 'date', 'sw1_code'],inplace=True)
        df_normalization =df_used.groupby(['trade_date','industry_id'])[columns].apply(lambda x:(x-x.mean())/x.std())

        df[columns] = df_normalization.loc[df.index, columns]

        return df.reset_index()

    def cs_weight_normalization_process(self,
                                        df,
                                        columns,
                                        weight_column='scale_total_market_size',
                                        copy=True,
                                        verbose=False):
        '''
        Cross-sectional 加权标准化

        :param df:
        :param columns: 要处理的特征
        :param weight_column: 计算加权标准化均值的权重，默认为流通市值
        :param copy:
        :param verbose: 是否输出信息
        :return:
        '''
        if copy:
            df = df.copy()

        df_used = df.copy()
        df_used['trade_date'] = df['date'].copy()
        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

        df = df.set_index(['stock_code', 'date'])
        df_used = df_used.set_index(['stock_code', 'date'])

        df_normalization = df_used.groupby('trade_date')[columns+[weight_column]].apply(
            self._weight_normalization,weight_column, verbose
        )

        df[columns] = df_normalization.loc[df.index, columns]

        return df.reset_index()

    def cs_rank_normalization_process(self, df, columns, copy=True, verbose=False):
        '''
        Cross-sectional 秩标准化

        :param df:
        :param columns: 要处理的特征
        :param copy:
        :param verbose: 是否输出信息
        :return:
        '''
        if copy:
            df = df.copy()

        df_used = df.copy()
        df_used['trade_date'] = df['date'].copy()

        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

        df = df.set_index(['stock_code', 'date'])
        df_used = df_used.set_index(['stock_code', 'date'])

        df_normalization = df_used.groupby('trade_date')[columns].apply(self._rank_normalization,verbose)

        df[columns] = df_normalization.loc[df.index, columns]

        return df.reset_index()

    def cs_z_score_normalization_process(self, df, columns, copy=True, verbose=False):
        '''
        Cross-sectional z_score标准化

        :param df:
        :param columns: 要处理的特征
        :param copy:
        :param verbose: 是否输出信息
        :return:
        '''
        if copy:
            df = df.copy()

        df_used = df.copy()
        df_used['trade_date'] = df['date'].copy()

        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

        df = df.set_index(['stock_code', 'date'])
        df_used = df_used.set_index(['stock_code', 'date'])

        df_normalization = df_used.groupby('trade_date')[columns].apply(self._z_score_normalization,verbose)

        df[columns] = df_normalization.loc[df.index, columns]

        return df.reset_index()

    def _ci1_ind_neutral(self, df, verbose=True):
        '''中信一级行业中性化处理(回归法)，用于横截面处理'''
        data = df.copy()

        today = data.index.unique()[0][1].strftime('%Y-%m-%d')

        # 输入数据包含行业数据
        if today < '2019-12-02':
            ci1_unique = [
                'CI005001',
                'CI005002',
                'CI005003',
                'CI005004',
                'CI005005',
                'CI005006',
                'CI005007',
                'CI005008',
                'CI005009',
                'CI005010',
                'CI005011',
                'CI005012',
                'CI005013',
                'CI005014',
                'CI005015',
                'CI005016',
                'CI005017',
                'CI005018',
                'CI005019',
                'CI005020',
                'CI005021',
                'CI005022',
                'CI005023',
                'CI005024',
                'CI005025',
                'CI005026',
                'CI005027',
                'CI005028',
                'CI005029',
                # 'CI005000',
            ]
        else:
            ci1_unique = [
                'CI005001',
                'CI005002',
                'CI005003',
                'CI005004',
                'CI005005',
                'CI005006',
                'CI005007',
                'CI005008',
                'CI005009',
                'CI005010',
                'CI005011',
                'CI005012',
                'CI005013',
                'CI005014',
                'CI005015',
                'CI005016',
                'CI005017',
                'CI005018',
                'CI005019',
                'CI005020',
                'CI005021',
                'CI005022',
                'CI005023',
                'CI005024',
                'CI005025',
                'CI005026',
                'CI005027',
                'CI005028',
                'CI005029',
                'CI005030',
                # 'CI005000',
            ]

        ci1_unique = {sorted(ci1_unique)[i]: i for i in range(len(ci1_unique))}
        if verbose: print('new ci1 unique code used!')


        columns = data.columns

        factor_names = columns.drop('ci1_code')

        data['ci1_code_id'] = data['ci1_code'].map(lambda x: ci1_unique[x])

        ind_data = one_hot(data['ci1_code_id'].values, len(ci1_unique))

        ind_data = pd.DataFrame(ind_data,
                                columns=ci1_unique.keys(),
                                index=data.index)

        ind_list = ind_data.columns

        total_data = pd.concat([data[columns], ind_data], axis=1)

        # df_neutral = total_data.dropna()
        df_neutral = total_data.copy()

        for factor in factor_names:
            try:
                # TODO - 如果还存在NAN，则用当期横截面中位数进行修补
                med_value = df_neutral[factor].quantile(0.5)
                df_neutral[factor] = df_neutral[factor].fillna(med_value)

                # TODO - regress on ci1 industrial dummies
                reg = sm.OLS(df_neutral[factor], df_neutral[ind_list], hasconst=False).fit()
                df_neutral[factor] = reg.resid
            except:
                print(factor, 'exception, so no regression!')

        data = data.set_index('ci1_code', append=True)
        df_neutral = df_neutral.set_index('ci1_code', append=True)

        data[factor_names] = df_neutral.loc[data.index, factor_names]

        data.drop('ci1_code_id', axis=1, inplace=True)

        return data

    def _sw1_ind_neutral(self, df, verbose=True):
        '''申万一级行业中性化处理(回归法)，用于横截面处理'''
        # 输入数据包含行业数据
        data = df.copy()

        # 噪声测试
        # data['tech_up_turnover_3m'].iloc[[2,10,28]] = np.nan

        today = data.index.unique()[0][1].strftime('%Y-%m-%d')

        if today < '2014-01-01':
            # sw1_unique = [
            #     '801190',
            #     '801180',
            #     '801150',
            #     '801230',
            #     '801050',
            #     '810000',
            #     '801210',
            #     '801060',
            #     '801110',
            #     '801090',
            #     '801130',
            #     '801120',
            #     '801080',
            #     '801100',
            #     '801170',
            #     '801140',
            #     '801160',
            #     '801070',
            #     '801010',
            #     '801200',
            #     '801030',
            #     '801220',
            #     '801020',
            #     '801040'
            # ]

            sw1_unique = [
                '801010',
                '801020',
                '801030',
                '801040',
                '801050',
                '801060',
                '801070',
                '801080',
                '801090',
                '801100',
                '801110',
                '801120',
                '801130',
                '801140',
                '801150',
                '801160',
                '801170',
                '801180',
                '801190',
                '801200',
                '801210',
                '801220',
                '801230',
                '801890'
            ]

            sw1_unique = {sorted(sw1_unique)[i]: i for i in range(len(sw1_unique))}
            if verbose: print('old sw1 unique code used!')
        elif today >= '2014-02-21':
            # sw1_unique = [
            #     '801780',
            #     '801180',
            #     '801150',
            #     '801050',
            #     '801210',
            #     '801890',
            #     '801230',
            #     '801720',
            #     '801710',
            #     '801110',
            #     '801880',
            #     '801120',
            #     '801080',
            #     '801750',
            #     '801170',
            #     '810000',
            #     '801140',
            #     '801160',
            #     '801770',
            #     '801010',
            #     '801130',
            #     '801200',
            #     '801030',
            #     '801760',
            #     '801020',
            #     '801790',
            #     '801730',
            #     '801040',
            #     '801740'
            # ]

            sw1_unique = [
                '801010',
                '801020',
                '801030',
                '801040',
                '801050',
                '801080',
                '801110',
                '801120',
                '801130',
                '801140',
                '801150',
                '801160',
                '801170',
                '801180',
                '801200',
                '801210',
                '801230',
                '801710',
                '801720',
                '801730',
                '801740',
                '801750',
                '801760',
                '801770',
                '801780',
                '801790',
                '801880',
                '801890'
            ]

            sw1_unique = {sorted(sw1_unique)[i]: i for i in range(len(sw1_unique))}
            if verbose: print('new sw1 unique code used!')
        else:
            # sw1_unique = [
            #     '801780',
            #     '801180',
            #     '801050',
            #     '801210',
            #     '801230',
            #     '801710',
            #     '801110',
            #     '801120',
            #     '801080',
            #     '801100',
            #     '801880',
            #     '801140',
            #     '801160',
            #     '801150',
            #     '801770',
            #     '801890',
            #     '801130',
            #     '801010',
            #     '801720',
            #     '801200',
            #     '801030',
            #     '801170',
            #     '801760',
            #     '801070',
            #     '801730',
            #     '801060',
            #     '801020',
            #     '801790',
            #     '801220',
            #     '801040',
            #     '801740',
            #     '801750'
            # ]

            sw1_unique = [
                '801010',
                '801020',
                '801030',
                '801040',
                '801050',
                '801060',
                '801070',
                '801080',
                '801100',
                '801110',
                '801120',
                '801130',
                '801140',
                '801150',
                '801160',
                '801170',
                '801180',
                '801200',
                '801210',
                '801220',
                '801230',
                '801710',
                '801720',
                '801730',
                '801740',
                '801750',
                '801760',
                '801770',
                '801780',
                '801790',
                '801880',
                '801890'
            ]

            sw1_unique = {sorted(sw1_unique)[i]: i for i in range(len(sw1_unique))}
            if verbose: print('new sw1 unique code used!')

        columns = data.columns
        factor_names = columns.drop('sw1_code')

        data['sw1_code_id'] = data['sw1_code'].map(lambda x: sw1_unique[x])

        ind_data = one_hot(data['sw1_code_id'].values, len(sw1_unique))
        ind_data = pd.DataFrame(ind_data,
                                columns=sw1_unique.keys(),
                                index=data.index)
        ind_list = ind_data.columns

        total_data = pd.concat([data[columns], ind_data], axis=1)
        # df_neutral = total_data.dropna()
        df_neutral = total_data.copy()

        for factor in factor_names:
            try:
                # TODO - 如果还存在NAN，则用当期横截面中位数进行修补
                med_value = df_neutral[factor].quantile(0.5)
                df_neutral[factor] = df_neutral[factor].fillna(med_value)

                # TODO - regress on sw1 industrial dummies
                reg = sm.OLS(df_neutral[factor], df_neutral[ind_list], hasconst=False).fit()
                df_neutral[factor] = reg.resid
            except:
                print(factor, 'exception, so no regression!')

        data = data.set_index('sw1_code', append=True)

        df_neutral = df_neutral.set_index('sw1_code', append=True)

        data[factor_names] = df_neutral.loc[data.index, factor_names]

        data.drop('sw1_code_id', axis=1, inplace=True)

        if verbose:
            print(today, 'done!')

        return data

    def _cap_sw1_ind_neutral(self, df, verbose=True):
        '''市值+申万一级行业中性化处理(回归法)，用于横截面处理'''
        # 输入数据包含行业数据
        data = df.copy()

        today = data.index.unique()[0][1].strftime('%Y-%m-%d')

        if today < '2014-01-01':
            sw1_unique = [
                '801010',
                '801020',
                '801030',
                '801040',
                '801050',
                '801060',
                '801070',
                '801080',
                '801090',
                '801100',
                '801110',
                '801120',
                '801130',
                '801140',
                '801150',
                '801160',
                '801170',
                '801180',
                '801190',
                '801200',
                '801210',
                '801220',
                '801230',
                '801890'
            ]

            sw1_unique = {sorted(sw1_unique)[i]: i for i in range(len(sw1_unique))}
            if verbose: print('old sw1 unique code used!')
        elif today >= '2014-02-21':
            sw1_unique = [
                '801010',
                '801020',
                '801030',
                '801040',
                '801050',
                '801080',
                '801110',
                '801120',
                '801130',
                '801140',
                '801150',
                '801160',
                '801170',
                '801180',
                '801200',
                '801210',
                '801230',
                '801710',
                '801720',
                '801730',
                '801740',
                '801750',
                '801760',
                '801770',
                '801780',
                '801790',
                '801880',
                '801890'
            ]

            sw1_unique = {sorted(sw1_unique)[i]: i for i in range(len(sw1_unique))}
            if verbose: print('new sw1 unique code used!')
        else:
            sw1_unique = [
                '801010',
                '801020',
                '801030',
                '801040',
                '801050',
                '801060',
                '801070',
                '801080',
                '801100',
                '801110',
                '801120',
                '801130',
                '801140',
                '801150',
                '801160',
                '801170',
                '801180',
                '801200',
                '801210',
                '801220',
                '801230',
                '801710',
                '801720',
                '801730',
                '801740',
                '801750',
                '801760',
                '801770',
                '801780',
                '801790',
                '801880',
                '801890'
            ]

        columns = data.columns
        factor_names = columns.difference(['sw1_code', 'scale_total_market_size'])

        data['sw1_code_id'] = data['sw1_code'].map(lambda x: sw1_unique[x])

        ind_data = one_hot(data['sw1_code_id'].values, len(sw1_unique))
        ind_data = pd.DataFrame(ind_data,
                                columns=sw1_unique.keys(),
                                index=data.index)
        ind_list = ind_data.columns

        total_data = pd.concat([data[columns], ind_data], axis=1)
        # df_neutral = total_data.dropna()
        df_neutral = total_data.copy()

        for factor in factor_names:
            try:
                # TODO - 如果还存在NAN，则用当期横截面中位数进行修补
                med_value = df_neutral[factor].quantile(0.5)
                df_neutral[factor] = df_neutral[factor].fillna(med_value)

                med_value = df_neutral['scale_total_market_size'].quantile(0.5)
                df_neutral['scale_total_market_size'] = df_neutral['scale_total_market_size'].fillna(med_value)

                reg = sm.OLS(df_neutral[factor],
                             df_neutral[['scale_total_market_size'] + ind_list.tolist()],
                             hasconst=False).fit()

                df_neutral[factor] = reg.resid
            except:
                print(factor, 'exception, so no regression!')

        data = data.set_index('sw1_code', append=True)

        df_neutral = df_neutral.set_index('sw1_code', append=True)

        data[factor_names] = df_neutral.loc[data.index, factor_names]

        data.drop('sw1_code_id', axis=1, inplace=True)

        if verbose:
            print(today, 'done!')

        return data

    def _cap_ci1_ind_neutral(self, df, verbose=True):
        '''市值+中信一级行业中性化处理(回归法)，用于横截面处理'''
        # 输入数据包含行业数据
        data = df.copy()

        today = data.index.unique()[0][1].strftime('%Y-%m-%d')

        if today < '2019-12-02':
            ci1_unique = [
                'CI005001',
                'CI005002',
                'CI005003',
                'CI005004',
                'CI005005',
                'CI005006',
                'CI005007',
                'CI005008',
                'CI005009',
                'CI005010',
                'CI005011',
                'CI005012',
                'CI005013',
                'CI005014',
                'CI005015',
                'CI005016',
                'CI005017',
                'CI005018',
                'CI005019',
                'CI005020',
                'CI005021',
                'CI005022',
                'CI005023',
                'CI005024',
                'CI005025',
                'CI005026',
                'CI005027',
                'CI005028',
                'CI005029',
                # 'CI005000',
            ]
        else:
            ci1_unique = [
                'CI005001',
                'CI005002',
                'CI005003',
                'CI005004',
                'CI005005',
                'CI005006',
                'CI005007',
                'CI005008',
                'CI005009',
                'CI005010',
                'CI005011',
                'CI005012',
                'CI005013',
                'CI005014',
                'CI005015',
                'CI005016',
                'CI005017',
                'CI005018',
                'CI005019',
                'CI005020',
                'CI005021',
                'CI005022',
                'CI005023',
                'CI005024',
                'CI005025',
                'CI005026',
                'CI005027',
                'CI005028',
                'CI005029',
                'CI005030',
                # 'CI005000',
            ]

        ci1_unique = {sorted(ci1_unique)[i]: i for i in range(len(ci1_unique))}
        if verbose: print('new ci1 unique code used!')

        columns = data.columns
        factor_names = columns.difference(['ci1_code', 'scale_total_market_size'])

        data['ci1_code_id'] = data['ci1_code'].map(lambda x: ci1_unique[x])

        ind_data = one_hot(data['ci1_code_id'].values, len(ci1_unique))
        ind_data = pd.DataFrame(ind_data,
                                columns=ci1_unique.keys(),
                                index=data.index)
        ind_list = ind_data.columns

        total_data = pd.concat([data[columns], ind_data], axis=1)
        # df_neutral = total_data.dropna()
        df_neutral = total_data.copy()

        for factor in factor_names:
            try:
                # TODO - 如果还存在NAN，则用当期横截面中位数进行修补
                med_value = df_neutral[factor].quantile(0.5)
                df_neutral[factor] = df_neutral[factor].fillna(med_value)

                med_value = df_neutral['scale_total_market_size'].quantile(0.5)
                df_neutral['scale_total_market_size'] = df_neutral['scale_total_market_size'].fillna(med_value)

                reg = sm.OLS(df_neutral[factor],
                             df_neutral[['scale_total_market_size'] + ind_list.tolist()], hasconst=False).fit()

                df_neutral[factor] = reg.resid
            except:
                print(factor, 'exception, so no regression!')

        data = data.set_index('ci1_code', append=True)
        df_neutral = df_neutral.set_index('ci1_code', append=True)
        data[factor_names] = df_neutral.loc[data.index, factor_names]
        data.drop('ci1_code_id', axis=1, inplace=True)

        return data

    def _cap_ind_neutral(self, df, verbose=False):
        '''市值中性化处理(回归法)，用于横截面处理'''
        # 输入数据包含市值数据
        data = df.copy()
        columns = data.columns
        factor_names = columns.drop('scale_total_market_size')
        df_used = data.dropna()

        for factor in factor_names:
            # TODO - 如果还存在NAN，则用当期横截面中位数进行修补
            df_used[factor] = df_used[factor].fillna(0.0)

            med_value = df_used['scale_total_market_size'].quantile(0.5)
            df_used['scale_total_market_size'] = df_used['scale_total_market_size'].fillna(med_value)

            reg = sm.OLS(df_used[factor], df_used['scale_total_market_size'], hasconst=False).fit()

            df_used[factor] = reg.resid

        data[columns] = df_used.loc[data.index, columns]

        if verbose:
            print(data.index.unique()[0][1].strftime('%Y-%m-%d'), 'done!')

        return data

    def cs_sw1_ind_neutral_process(self, df, factor_columns, copy=True, verbose=False):
        '''
          申万一级行业的中性化处理(回归法）

          :param df: to process pandas dataframe
          :param factor_columns: to process pandas dataframe columns
          :return:
          '''

        # TODO - record index
        if copy:
            df = df.copy()

        # 去重
        df = df.drop_duplicates(['stock_code', 'date', 'sw1_code'], keep='first')
        df.set_index(['stock_code', 'date'], inplace=True)
        # df = df.reset_index()

        df_neutralize = df.groupby('date')[factor_columns + ['sw1_code']].apply(self._sw1_ind_neutral,
                                                                                verbose)

        df = df.set_index('sw1_code', append=True)
        df_neutralize.index = df_neutralize.index.droplevel(0)
        df[factor_columns] = df_neutralize.loc[df.index, factor_columns]

        return df.reset_index()

    def cs_ci1_ind_neutral_process(self, df, factor_columns, copy=True, verbose=False):
        '''
          中信一级行业的中性化处理(回归法）

          :param df: to process pandas dataframe
          :param factor_columns: to process pandas dataframe columns
          :return:
          '''

        # TODO - record index
        if copy:
            df = df.copy()

        # 去重
        df = df.drop_duplicates(['stock_code', 'date', 'ci1_code'], keep='first')
        df.set_index(['stock_code', 'date'], inplace=True)
        # df = df.reset_index()

        df_neutralize = df.groupby('date')[factor_columns + ['ci1_code']].apply(self._ci1_ind_neutral,
                                                                                verbose)

        df = df.set_index('ci1_code', append=True)
        df_neutralize.index = df_neutralize.index.droplevel(0)
        df[factor_columns] = df_neutralize.loc[df.index, factor_columns]

        return df.reset_index()

    def cs_cap_sw1_ind_neutral_process(self, df, factor_columns, copy=True, verbose=False):
        '''
          市值+申万一级行业的中性化处理(回归法）

          :param df: to process pandas dataframe
          :param factor_columns: to process pandas dataframe columns
          :return:
          '''

        # TODO - record index
        if copy:
            df = df.copy()

        # df['trade_date'] = df['date'].copy()

        # 去重
        df = df.drop_duplicates(['stock_code', 'date'], keep='first')
        df.set_index(['stock_code', 'date'], inplace=True)

        # TODO - 市值取对数
        df['scale_total_market_size'] = np.log(df['scale_total_market_size'])

        df_neutralize = df.groupby('date')[factor_columns + ['sw1_code', 'scale_total_market_size']].apply(
            self._cap_sw1_ind_neutral, verbose
        )

        df = df.set_index('sw1_code', append=True)
        df_neutralize.index = df_neutralize.index.droplevel(0)
        df[factor_columns] = df_neutralize.loc[df.index, factor_columns]

        return df.reset_index()

    def cs_cap_ci1_ind_neutral_process(self, df, factor_columns, copy=True, verbose=False):
        '''
          市值+中信一级行业的中性化处理(回归法）

          :param df: to process pandas dataframe
          :param factor_columns: to process pandas dataframe columns
          :return:
          '''

        # TODO - record index
        if copy:
            df = df.copy()

        # df['trade_date'] = df['date'].copy()

        # 去重
        df = df.drop_duplicates(['stock_code', 'date'], keep='first')
        df.set_index(['stock_code', 'date'], inplace=True)

        # TODO - 市值取对数
        df['scale_total_market_size'] = np.log(df['scale_total_market_size'])

        df_neutralize = df.groupby('date')[factor_columns + ['ci1_code', 'scale_total_market_size']].apply(
            self._cap_ci1_ind_neutral, verbose
        )

        df = df.set_index('ci1_code', append=True)
        df_neutralize.index = df_neutralize.index.droplevel(0)
        df[factor_columns] = df_neutralize.loc[df.index, factor_columns]

        return df.reset_index()

    def cs_cap_neutral_process(self, df, columns, copy=True):
        '''
          市值中性化处理(回归法）

          :param df: to process pandas dataframe
          :param columns: to process pandas dataframe columns
          :return:
        '''
        # TODO - record index
        if copy:
            df_used = df.copy()
        else:
            df_used = df

        df_used['trade_date'] = df['date'].copy()

        # 去重
        df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

        df_used.set_index(['stock_code', 'date'], inplace=True)

        # TODO - 市值取对数
        df_used['scale_total_market_size'] = np.log(df_used['scale_total_market_size'])

        df_neutralize= df_used.groupby('date')[columns +
                                               ['scale_total_market_size']].apply(self._cap_ind_neutral)

        # df.set_index(['stock_code', 'date'], inplace=True)
        stock_code_date_multi_index = df[['stock_code', 'date']].set_index(['stock_code', 'date']).index

        df[columns] = df_neutralize.loc[stock_code_date_multi_index, columns].values
        # df_used[columns] = df_neutralize.loc[df.index, columns]

        return df

class AdvancedDataProcessing(BasicClass):
    def symmetry_orthogonal(self, df, columns):
        # TODO - 对称正交化
        df = df.copy()

        factor_data = df[columns].copy()

        var_mat = np.diag(factor_data.var(axis=0).values)
        factor_data = factor_data.values

        N = factor_data.shape[0]

        # 计算重叠矩阵M
        M = (N - 1) * np.cov(factor_data.T)

        # 计算重叠矩阵的特征值和特征向量
        D, U = np.linalg.eig(M)

        # 计算转移矩阵
        S = np.dot(np.dot(U, np.linalg.inv(np.diag(D)**0.5)), U.T)

        # Rescale the factors to their original variances
        S = np.dot(S*((N-1)**0.5), var_mat**0.5)

        # 计算对称正交化处理后的矩阵
        F_orth = np.dot(factor_data, S)

        # TODO - 正交结果输出
        df[columns] = F_orth

        return df

    def gram_schmidt_orthogonal(self, df, sorted_columns, is_unit=True):
        # TODO - 施密特正交化
        df = df.copy()

        factor_data = df[sorted_columns].copy()

        factor_data = factor_data.values

        N, K = factor_data.shape

        # 初始化 F 的正交矩阵 _F
        _F = np.zeros_like(factor_data)

        # 顺序正交
        for k in range(K):
            _f_k = factor_data[:, k]

            if k > 0:
                for j in range(k):
                    uu = np.dot(np.dot(_F[:, j].T, factor_data[:, k]), _F[:, j]) / np.dot(_F[:, j], _F[:, j].T)

                    _f_k = _f_k - uu

            if is_unit:
                e_k = _f_k / np.linalg.norm(_f_k)

                _F[:, k] = e_k
            else:
                _F[:, k] = _f_k


        # TODO - 正交结果输出
        df[sorted_columns] = _F

        return df

# TODO - 功能汇总
class DataProcess(MissingDataProcess,
                  DataFilteration,
                  DuplicateProcssing,
                  OutlierProcessing,
                  AdvancedDataProcessing,
                  DataTransformation):
    def __init__(self):
        super(DataProcess, self).__init__()



if __name__ == '__main__':
    pass


