#!/usr/bin/python
# -*-coding:utf-8-*-
import os
import pandas as pd
import numpy as np
from time import time

def _mad_winsorize(factor, n=3 * 1.4826, drop=False, verbose=False):
    '''MAD 去极值的函数，用于横截面处理'''
    # data = factor[factor_list].copy()
    # data = factor.copy()
    data = factor[['csi500_valuation3_syn_factor_value']].copy()
    factor_names = data.columns

    D_M = data.median()
    ls_deviation = np.abs(data - D_M)
    D_MAD = ls_deviation.median()

    lower = D_M - n * D_MAD
    upper = D_M + n * D_MAD

    # used_factor_names = factor_names[:1]
    # st = time()
    for factor_name in factor_names:
        factor_lower = lower[factor_name]
        factor_upper = upper[factor_name]
        if not drop:
            st = time()
            nan_location = data[factor_name].isnull()
            et = time()
            print('time is %.4f sec.' % (et-st))

            st = time()
            data.loc[data[factor_name] < factor_lower, factor_name] = factor_lower
            data.loc[data[factor_name] > factor_upper, factor_name] = factor_upper
            et = time()
            print('time is %.4f sec.' % (et - st))

            st = time()
            data.loc[nan_location, factor_name] = np.nan
            et = time()
            print('time is %.4f sec.' % (et - st))
        else:
            # TODO - 直接去掉
            data = data.loc[data[factor_name] >= factor_lower]
            data = data.loc[data[factor_name] <= factor_upper]
    # et = time()
    # print('time is %.4f sec.' % (et-st))

    if verbose:
        print(data.index.unique()[0][1].strftime('%Y-%m-%d'), 'done!')

    return data

def cs_mad_outlier_process(df, columns, n=3*1.4826, drop=False, copy=True, verbose=False):
    '''
    Cross-sectional 横截面MAD法去极值

    :param df:
    :param columns: 要处理的特征
    :param n:       MAD法参数
    :param drop:    是否去掉极值
    :param copy:
    :param verbose: 是否输出信息
    :return:
    '''
    if copy:
        df = df.copy()

    df_used = df.copy()
    df_used['trade_date'] = df['date'].copy()

    # 去重
    df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

    df = df.set_index(['stock_code', 'date'])
    df_used = df_used.set_index(['stock_code', 'date'])

    df_filter = df_used.groupby('trade_date')[columns].apply(_mad_winsorize, n, drop, verbose)

    df[columns] = df_filter.loc[df.index, columns]

    return df.reset_index()


def _mad_winsorize_v2(factor, n=3 * 1.4826, drop=False, verbose=True):
    '''MAD 去极值的函数，用于横截面处理'''
    # data = factor[factor_list].copy()
    data = factor.copy()
    # data = factor[['csi500_valuation3_syn_factor_value']].copy()

    D_M = data.median()
    ls_deviation = np.abs(data - D_M)
    D_MAD = ls_deviation.median()

    lower = D_M - n * D_MAD
    upper = D_M + n * D_MAD

    return pd.concat([upper, lower])

def cs_mad_outlier_process_v2(df, columns, n=3*1.4826, drop=False, copy=True, verbose=False):
    '''
    Cross-sectional 横截面MAD法去极值

    :param df:
    :param columns: 要处理的特征
    :param n:       MAD法参数
    :param drop:    是否去掉极值
    :param copy:
    :param verbose: 是否输出信息
    :return:
    '''
    if copy:
        df = df.copy()

    df_used = df.copy()
    df_used['trade_date'] = df['date'].copy()

    # 去重
    df_used = df_used.drop_duplicates(['stock_code', 'date'], keep='first')

    df = df.set_index(['stock_code', 'date'])
    df_used = df_used.set_index(['stock_code', 'date'])

    used_columns = columns[:1]

    st = time()
    df_filter = df_used.groupby('trade_date')[used_columns].apply(_mad_winsorize, n, drop, verbose)
    et = time()
    print('time spent is %.4f sec.' % (et-st))

    st = time()
    for factor_name in used_columns:
        df_filter = df_used.groupby('trade_date')[[factor_name]].apply(_mad_winsorize_v2, n, drop, verbose)
        df_filter.columns = ['up', 'low']

        temp = df_used[[factor_name]].reset_index()
        temp['up'] = df_filter.loc[temp['date'], 'up'].values
        temp['low'] = df_filter.loc[temp['date'], 'low'].values
        temp.loc[temp[factor_name] > temp['up'], factor_name] = temp.loc[temp[factor_name] > temp['up'], 'up']
        temp.loc[temp[factor_name] < temp['low'], factor_name] = temp.loc[temp[factor_name] < temp['low'], 'low']

        df_used[factor_name] = temp[factor_name].copy()
    et = time()
    print('time spent is %.4f sec.' % (et-st))

    df[columns] = df_filter.loc[df.index, columns]

    return df.reset_index()


###
cache_factor_dir = '/db/zg_data/zbc/jh_fund_cooperative_project/linear_dynamic_style_alloc/csi500_corr_synthesis_model/cache'
concat_factor_data_filename = 'jh_ths_concat_and_label_synthesis_factor_on_csi500_pool_data'

concat_factor_data = pd.read_hdf(os.path.join(cache_factor_dir, concat_factor_data_filename + '.h5'))

concat_factor_data = concat_factor_data[concat_factor_data['date'] >= '2019-05-29']

concat_factor_data = concat_factor_data.dropna()

factor_list = [
    'csi500_cashflow1_syn_factor_value',
    'csi500_cashflow2_syn_factor_value',
    'csi500_growth1_syn_factor_value',
    'csi500_growth2_syn_factor_value',
    'csi500_hf1_syn_factor_value',
    'csi500_hf2_syn_factor_value',
    'csi500_jh_fundemental_10_syn_factor_value',
    'csi500_jh_fundemental_1_syn_factor_value',
    'csi500_jh_fundemental_2_syn_factor_value',
    'csi500_jh_fundemental_3_syn_factor_value',
    'csi500_jh_fundemental_4_syn_factor_value',
    'csi500_jh_fundemental_6_syn_factor_value',
    'csi500_jh_fundemental_7_syn_factor_value',
    'csi500_jh_fundemental_8_syn_factor_value',
    'csi500_jh_fundemental_9_syn_factor_value',
    'csi500_jh_liquidity_1_syn_factor_value',
    'csi500_jh_liquidity_2_syn_factor_value',
    'csi500_jh_tech_syn_factor_value',
    'csi500_liquidity1_syn_factor_value',
    'csi500_liquidity2_syn_factor_value',
    'csi500_liquidity3_syn_factor_value',
    'csi500_mom1_syn_factor_value',
    'csi500_mom2_syn_factor_value',
    'csi500_mom3_syn_factor_value',
    'csi500_mom4_syn_factor_value',
    'csi500_network_syn_factor_value',
    'csi500_other1_syn_factor_value',
    'csi500_other2_syn_factor_value',
    'csi500_profitability1_syn_factor_value',
    'csi500_profitability2_syn_factor_value',
    'csi500_profitability3_syn_factor_value',
    'csi500_profitability4_syn_factor_value',
    'csi500_profitability5_syn_factor_value',
    'csi500_profitability6_syn_factor_value',
    'csi500_rvolatility1_syn_factor_value',
    'csi500_rvolatility2_syn_factor_value',
    'csi500_valuation1_syn_factor_value',
    'csi500_valuation2_syn_factor_value',
    'csi500_valuation3_syn_factor_value',
]

cs_mad_outlier_process(df=concat_factor_data, columns=factor_list, n=3*1.4826, drop=False, copy=True, verbose=False)


