#!/usr/bin/python
# -*-coding:utf-8-*-

'''基于组合优化构建因子'''

'''测试因子的时候，一般采取市值和行业中性化的方法'''

import os
import pandas as pd
import numpy as np
from cvxopt import matrix, solvers
from time import time
import multiprocessing as mp
from scipy import stats

from zg02_factor_lib.base.factors_library_base import NewFactorLib

from zg_data_process.zg_data_process import DataProcess
from zg_data_process.zg_data_concat import DataConcat

# 组合优化模块，不提供，主要是分组和配权
from zbc_portfolio_optimization.portfolio_construction.simple_trade_plan_generation import PortfolioGroupAndWeight

from zg_factor_analysis_module.trade_date_data_generation import get_every_fixed_frequency_trade_date

from zg_factor_analysis_module.config.public import ci1_code_map_dict

from zg_factor_analysis_module.utils import get_previous_trade_date


barra_data_dir = './zbc_factors_lib/jq_barra_data'
optimizer_database_dir = './zbc_factors_lib/optimizer_database'

''''''
db = 'ew_real'
analysis_db = 'validation'

data_reader = NewFactorLib(db=db)
analysis_data_reader = NewFactorLib(db=analysis_db)
data_concat_api = DataConcat(label_data_filename='processed_new_stock_label_ci1_data')
data_process_api = DataProcess()
group_weighting_api = PortfolioGroupAndWeight()

# data_reader.show_basic_library_db()

# TODO - 定义一些函数
## TODO - 读取BARRA数据
def load_barra_data(stock_index_code='000905'):
    if stock_index_code == '000905':
        stock_index_name = 'csi500'
    elif stock_index_code == '399102':
        stock_index_name = 'cybz'
    else:
        stock_index_name = 'csi500'

    # TODO - 读取数据
    # TODO - 获取BARRA因子
    stock_index_barra_style_exposure_data = \
        pd.read_hdf(os.path.join(optimizer_database_dir, stock_index_name + '_jq_barra_style_exposure_data.h5'))

    stock_index_barra_style_exposure_data = stock_index_barra_style_exposure_data.set_index('date')

    stock_index_ci1_exposure_data = \
        pd.read_hdf(os.path.join(optimizer_database_dir, stock_index_name + '_ci1_exposure_data.h5'))

    stock_index_ci1_exposure_data = stock_index_ci1_exposure_data.set_index('date')

    ci1_code_map_dict_upper = {k.upper(): ci1_code_map_dict[k] for k in ci1_code_map_dict}

    stock_index_ci1_exposure_data = stock_index_ci1_exposure_data.rename(columns=ci1_code_map_dict_upper)

    # 分段读取
    # barra_exposure_data = pd.read_hdf(os.path.join(self.barra_data_dir, 'barra_data.h5'))
    # barra_exposure_data = pd.read_hdf(os.path.join(self.barra_data_dir, 'new_processed_jq_barra_data.h5'))

    barra_exposure_data_p1 = pd.read_hdf(os.path.join(barra_data_dir, 'new_processed_jq_barra_data_v0.h5'))
    barra_exposure_data_p2 = pd.read_hdf(os.path.join(barra_data_dir, 'new_processed_jq_barra_data_v1.h5'))

    # before 2019-12-01
    barra_exposure_data_p1['date'] = pd.to_datetime(barra_exposure_data_p1['date'])

    barra_exposure_data_p1.columns = barra_exposure_data_p1.columns.str.lower()

    barra_exposure_data_p1 = barra_exposure_data_p1.rename(columns=ci1_code_map_dict)

    # after 2019-12-01
    barra_exposure_data_p2['date'] = pd.to_datetime(barra_exposure_data_p2['date'])

    barra_exposure_data_p2.columns = barra_exposure_data_p2.columns.str.lower()

    barra_exposure_data_p2 = barra_exposure_data_p2.rename(columns=ci1_code_map_dict)

    # TODO - 指数权重数据
    processed_daily_index_component_weight_data = \
        data_reader.read_basic_data_table('processed_daily_index_component_weight_data')

    stock_index_component_weight_data = \
        processed_daily_index_component_weight_data[processed_daily_index_component_weight_data['indcode'] == stock_index_code].copy()

    stock_index_component_weight_data['code'] = \
        stock_index_component_weight_data['code'].map(lambda sc: sc + '.SH' if sc[0] == '6' else sc + '.SZ')

    stock_index_component_weight_data = stock_index_component_weight_data[['date', 'code', 'weight']]

    return stock_index_barra_style_exposure_data, \
           stock_index_ci1_exposure_data, \
           barra_exposure_data_p1, \
           barra_exposure_data_p2, \
           stock_index_component_weight_data

## TODO - 参数
is_select_csi500_pool = False

is_ci1_ind_cap_neutral = True
is_cap_neutral = False
is_ci1_ind_neutral = False

# start_date = '2017-01-01'
# end_date = '2020-02-28'


''''''
class FactorMonitorV1(object):
    def __init__(self,
                 start_date,
                 end_date,
                 rebalance_date,
                 stock_index_code='000905',
                 **kwargs):
        self.stock_index_code = stock_index_code

        self.start_date = start_date
        self.end_date = end_date

        # self.ic_period = ic_period
        # self.icir_period = icir_period

        if 'ic_period' in kwargs.keys():
            self.ic_period = kwargs['ic_period']
        else:
            self.ic_period = 20

        if 'icir_period' in kwargs.keys():
            self.icir_period = kwargs['icir_period']
        else:
            self.icir_period = 20

        if 'is_cap_neutral' in kwargs.keys():
            self.is_cap_neutral = kwargs['is_cap_neutral']
        else:
            self.is_cap_neutral = True

        if 'is_ci1_ind_neutral' in kwargs.keys():
            self.is_ci1_ind_neutral = kwargs['is_ci1_ind_neutral']
        else:
            self.is_ci1_ind_neutral = False

        if 'weight_method' in kwargs.keys():
            self.weight_method = kwargs['weight_method']
        else:
            self.weight_method = 'ew'

        if 'neutral_group_num' in kwargs.keys():
            self.neutral_group_num = kwargs['neutral_group_num']
        else:
            self.neutral_group_num = 5

        if 'factor_group_num' in kwargs.keys():
            self.factor_group_num = kwargs['factor_group_num']
        else:
            self.factor_group_num = 5

        if 'result_save_dir' in kwargs.keys():
            self.result_save_dir = kwargs['result_save_dir']
        else:
            self.result_save_dir = '/db/zg_data/zbc/factor_analysis/monitor/public'

        # 是否直接读取给定路径的因子数据
        if 'factor_data_path' in kwargs.keys():
            self.factor_data_path = kwargs['factor_data_path']
        else:
            self.factor_data_path = None

        self.rebalance_date = rebalance_date
        self.rebalance_date = self.rebalance_date.set_index('selection_date')

        if not os.path.exists(self.result_save_dir):
            os.makedirs(self.result_save_dir)

        # self.initialization1()

    def _load_barra_data(self, stock_index_code='000905'):
        if stock_index_code == '000905':
            stock_index_name = 'csi500'
        elif stock_index_code == '399102':
            stock_index_name = 'cybz'
        else:
            stock_index_name = 'csi500'

        # TODO - 读取数据
        # TODO - 获取BARRA因子
        stock_index_barra_style_exposure_data = \
            pd.read_hdf(os.path.join(optimizer_database_dir, stock_index_name + '_jq_barra_style_exposure_data.h5'))
        # pd.read_hdf(os.path.join(self.optimizer_database_dir, stock_index_name + '_barra_style_exposure_data.h5'))

        stock_index_barra_style_exposure_data = stock_index_barra_style_exposure_data.set_index('date')

        stock_index_ci1_exposure_data = \
            pd.read_hdf(os.path.join(optimizer_database_dir, stock_index_name + '_ci1_exposure_data.h5'))

        stock_index_ci1_exposure_data = stock_index_ci1_exposure_data.set_index('date')

        ci1_code_map_dict_upper = {k.upper(): ci1_code_map_dict[k] for k in ci1_code_map_dict}

        stock_index_ci1_exposure_data = stock_index_ci1_exposure_data.rename(columns=ci1_code_map_dict_upper)

        # 分段读取
        # barra_exposure_data = pd.read_hdf(os.path.join(self.barra_data_dir, 'barra_data.h5'))
        # barra_exposure_data = pd.read_hdf(os.path.join(self.barra_data_dir, 'new_processed_jq_barra_data.h5'))

        barra_exposure_data_p1 = pd.read_hdf(os.path.join(barra_data_dir, 'new_processed_jq_barra_data_v0.h5'))
        barra_exposure_data_p2 = pd.read_hdf(os.path.join(barra_data_dir, 'new_processed_jq_barra_data_v1.h5'))

        # before 2019-12-01
        barra_exposure_data_p1['date'] = pd.to_datetime(barra_exposure_data_p1['date'])

        barra_exposure_data_p1.columns = barra_exposure_data_p1.columns.str.lower()

        barra_exposure_data_p1 = barra_exposure_data_p1.rename(columns=ci1_code_map_dict)

        # after 2019-12-01
        barra_exposure_data_p2['date'] = pd.to_datetime(barra_exposure_data_p2['date'])

        barra_exposure_data_p2.columns = barra_exposure_data_p2.columns.str.lower()

        barra_exposure_data_p2 = barra_exposure_data_p2.rename(columns=ci1_code_map_dict)

        # TODO - 指数权重数据
        processed_daily_index_component_weight_data = \
            data_reader.read_basic_data_table('processed_daily_index_component_weight_data')

        stock_index_component_weight_data = \
            processed_daily_index_component_weight_data[processed_daily_index_component_weight_data['indcode'] == stock_index_code].copy()

        stock_index_component_weight_data['code'] = \
            stock_index_component_weight_data['code'].map(lambda sc: sc + '.SH' if sc[0] == '6' else sc + '.SZ')

        stock_index_component_weight_data = stock_index_component_weight_data[['date', 'code', 'weight']]

        return stock_index_barra_style_exposure_data, \
               stock_index_ci1_exposure_data, \
               barra_exposure_data_p1, \
               barra_exposure_data_p2, \
               stock_index_component_weight_data

    def _get_return_data(self, trade_data):
        # start_date = pd.to_datetime(self.start_date) - pd.Timedelta(days=30)
        trade_data = trade_data.copy()

        trade_data['close_back'] =trade_data['close'] * trade_data['back_adjfactor']
        trade_data['open_back'] =trade_data['open'] * trade_data['back_adjfactor']
        trade_data.set_index(['code','date'], inplace=True)

        ret_data = trade_data.groupby('code')['close_back'].pct_change(1)

        ret_data = ret_data.reset_index()
        ret_data = ret_data.set_index(['date', 'code'])

        ic_ret_data = trade_data.groupby('code')['close_back'].pct_change(self.ic_period)

        ic_ret_data = ic_ret_data.reset_index()
        ic_ret_data = ic_ret_data.set_index(['date', 'code'])

        return ret_data, ic_ret_data

    # TODO - 通用数据初始化
    def initialization1(self):
        st = time()

        # # TODO - 读取barra数据
        # self.stock_index_barra_style_exposure_data, \
        # self.stock_index_ci1_exposure_data, \
        # self.barra_exposure_data_p1, \
        # self.barra_exposure_data_p2, \
        # self.stock_index_component_weight_data = self._load_barra_data(stock_index_code=self.stock_index_code)

        # TODO - 读取基础数据
        # ### 读取交易日数据
        # trade_date_data = get_every_fixed_frequency_trade_date(start_date=self.start_date,
        #                                                        end_date=self.end_date,
        #                                                        freq=10,
        #                                                        filter_date='rebalance_start')
        #
        # self.trade_date_data = trade_date_data.set_index('selection_date')

        # TODO - 交易日数据
        processed_trade_date_data = data_reader.read_basic_data_table('processed_trade_date_data')

        self.trade_date_data = pd.DatetimeIndex(processed_trade_date_data['trade_date'].unique())

        self.trade_date_data = self.trade_date_data[(self.trade_date_data >= self.start_date) &
                                                    (self.trade_date_data <= self.end_date)]

        ### TODO - 获取label数据
        # shift_start_date = pd.to_datetime(self.start_date) - pd.Timedelta(days=30)
        self.shift_start_date = get_previous_trade_date(self.start_date, self.ic_period*2+self.icir_period+1)

        label_data = data_reader.read_basic_data_table('processed_daily_stock_trade_data',
                                                       columns=['date',
                                                                'code',
                                                                'close',
                                                                'open',
                                                                'back_adjfactor'],
                                                       filter_list=["date >= '%s' and date <= '%s'" %
                                                                    (self.shift_start_date, self.end_date)])

        # TODO - 获取每日股票涨跌幅
        self.ret_data, \
        self.ic_ret_data = self._get_return_data(trade_data=label_data)

        # TODO - label因子
        label_data = label_data.rename(columns={'code': 'stock_code'})

        label_data = label_data.drop(['open', 'back_adjfactor'], axis=1)

        label_data = data_concat_api.label_factor_data(df=label_data,
                                                       start_date=None,
                                                       end_date=None,
                                                       cache=False,
                                                       cache_filename=None,
                                                       add_size_factor=True,
                                                       refresh=True,
                                                       verbose=True,
                                                       copy=True)

        label_data = label_data.set_index(['stock_code', 'date'])

        self.label_data = label_data.drop('close', axis=1)

        # del trade_date_data
        del label_data
        et = time()

        print('data initialization v1 done, time spent is %.4f sec.' % (et-st))

    # TODO - 通用数据初始化
    def initialization2(self):
        st = time()

        # TODO - 读取barra数据
        stock_index_barra_style_exposure_data, \
        stock_index_ci1_exposure_data, \
        barra_exposure_data_p1, \
        barra_exposure_data_p2, \
        self.stock_index_component_weight_data = self._load_barra_data(stock_index_code=self.stock_index_code)

        style_list = [
            'date',
            'symbol',
            'size',
            'beta',
            'momentum',
            'non_linear_size',
            'bp',
            'earnings_yield',
            'growth',
            'leverage',
            'liquidity',
            'volatility',
        ]

        self.barra_style_exposure = pd.concat([barra_exposure_data_p1[style_list],
                                               barra_exposure_data_p2[style_list]], axis=0, ignore_index=True)

        # self.barra_style_exposure = self.barra_style_exposure.set_index(['date', 'symbol'])
        self.barra_style_exposure = self.barra_style_exposure.set_index(['symbol', 'date'])

        # TODO - 读取指数数据
        self.stock_index_data = data_reader.read_basic_data_table('processed_daily_index_trade_data')

        self.stock_index_data = self.stock_index_data[self.stock_index_data['index_code']==self.stock_index_code]

        self.stock_index_data['return'] = self.stock_index_data['close'].pct_change()

        self.stock_index_data = self.stock_index_data.set_index('date')

        # TODO - 读取盈利数据
        self.profit_ind = analysis_data_reader.read_factor_table('profitability_roe_ttm')

        # self.profit_ind = self.profit_ind.set_index(['date', 'stock_code'])
        self.profit_ind = self.profit_ind.set_index(['stock_code', 'date'])

        # TODO - 读取成长数据
        self.growth_ind = analysis_data_reader.read_factor_table('growth_roe_single_season_change_4q')

        # self.growth_ind = self.growth_ind.set_index(['date', 'stock_code'])
        self.growth_ind = self.growth_ind.set_index(['stock_code', 'date'])

        # TODO - 读取估值数据
        self.bp = analysis_data_reader.read_factor_table('valuation_bp')
        # self.ep = analysis_data_reader.read_factor_table('valuation_ep_ttm')

        self.bp = self.bp.set_index(['stock_code', 'date'])
        # self.ep = self.ep.set_index(['stock_code', 'date'])

        # TODO - 读取动量数据
        self.mom = analysis_data_reader.read_factor_table('mom_reverse_1m')

        # self.mom = self.mom.set_index(['date', 'stock_code'])
        self.mom = self.mom.set_index(['stock_code', 'date'])

        # TODO - 读取换手率数据
        self.turnover = analysis_data_reader.read_factor_table('tech_turnover_1m')

        self.turnover = self.turnover.set_index(['stock_code', 'date'])

        et = time()
        print('data initialization v2 done, time spent is %.4f sec.' % (et-st))

    # TODO - 因子数据读取
    def load_factor_data(self, factor_name):
        # shift_start_date = pd.to_datetime(self.start_date) - pd.Timedelta(days=10)
        # shift_start_date = shift_start_date.strftime('%Y-%m-%d')

        if self.factor_data_path is None:
            factor_data = data_reader.read_factor_table(factor_name,
                                                        filter_list=["date >= '%s' and date <= '%s'" %
                                                                     (self.shift_start_date, self.end_date)])
        else:
            factor_data = pd.read_hdf(os.path.join(self.factor_data_path, factor_name+'.h5'),
                                      where=["date >= '%s' and date <= '%s'" %
                                             (self.shift_start_date, self.end_date)])

        # # TODO - 选择selection_date对应天数的因子数据
        # factor_data = factor_data[factor_data['date'].isin(self.rebalance_date['selection_date'])]

        factor_data = factor_data[['date', 'stock_code', factor_name]]

        # self.factor_data = factor_data.set_index(['stock_code', 'date'])

        return factor_data.set_index(['stock_code', 'date'])

    # TODO - 因子数据处理
    def factor_data_process(self, factor_data, factor_name):
        st = time()
        # TODO - 拼接数据
        label_factor_data = pd.concat([self.label_data,
                                       factor_data], axis=1,
                                      join_axes=[factor_data.index])

        label_factor_data = label_factor_data.reset_index()
        print('read and label factor data done!')

        '''股票池过滤'''
        # TODO - 去掉delist_date为nan的
        label_factor_data = label_factor_data[~label_factor_data['delist_date'].isnull()]
        # print('concat factor data drop delist date nan done!')

        # TODO - 过滤退市股票
        label_factor_data = label_factor_data[label_factor_data['date'] < label_factor_data['delist_date']]
        label_factor_data = label_factor_data[~label_factor_data['scale_total_market_size'].isnull()]  # 一般市值为空都是退市股票
        # print('concat factor data drop delist stock and null cap data done!')

        # TODO - 去掉st
        label_factor_data = data_process_api.st_filteration(df=label_factor_data, drop_label=True)
        # print('concat factor data drop st and new&sub-new stocks done!')

        # TODO - 过滤新股和次新股
        label_factor_data = data_process_api.new_subnew_stock_filtration(df=label_factor_data, drop_label=True)
        # print('concat factor data  st and new&sub-new stocks done!')

        # TODO - 去掉空值的中信一级行业
        print('before drop ci1 nan code, shape is', label_factor_data.shape)
        label_factor_data = label_factor_data[~label_factor_data['ci1_code'].isnull()]
        print('after drop ci1 nan code, shape is', label_factor_data.shape)

        # TODO - 去掉中信一级行业 未分类 行业(20200227 new added)
        print('before drop ci1 CI005000 code, shape is', label_factor_data.shape)
        label_factor_data = label_factor_data[label_factor_data['ci1_code'] != 'CI005000']
        print('after drop ci1 CI005000 code, shape is', label_factor_data.shape)

        print('first-step factor data filteration done!')

        '''因子预处理'''
        factor_name_list = [factor_name]

        # TODO - 缺失值处理
        print('before drop nan factor data shape is', label_factor_data.shape)
        # concat_factor_data = concat_factor_data.dropna()
        # concat_factor_data = data_processor.filled_with_ci1_stats_value(df=concat_factor_data,
        label_factor_data = data_process_api.filled_with_market_stats_value(df=label_factor_data,
                                                                            columns=factor_name_list,
                                                                            q=0.5)
        print('after drop nan factor data shape is', label_factor_data.dropna().shape)
        print('concat factor data market fill nan done!')

        # TODO - 去极值
        label_factor_data = data_process_api.cs_mad_outlier_process(df=label_factor_data,
                                                                    columns=factor_name_list,
                                                                    drop=False,
                                                                    copy=True,
                                                                    verbose=False)
        print('concat factor data mad outlier processed!')

        '''标准化+中性化处理'''
        if is_ci1_ind_cap_neutral:
            # 3. 标准化
            label_factor_data = data_process_api.cs_z_score_normalization_process(df=label_factor_data,
                                                                                  columns=factor_name_list,
                                                                                  copy=True,
                                                                                  verbose=False)
            print('concat factor data normalized!')

            # 4. 行业和市值中性化
            label_factor_data = data_process_api.cs_cap_ci1_ind_neutral_process(df=label_factor_data,
                                                                                factor_columns=factor_name_list)
            print('concat factor data cap and ci1-ind neutralization done!')
        else:
            if is_cap_neutral:
                # 3. 标准化
                label_factor_data = data_process_api.cs_z_score_normalization_process(df=label_factor_data,
                                                                                      columns=factor_name_list,
                                                                                      copy=True,
                                                                                      verbose=False)
                print('concat factor data normalized!')

                # 4. 市值中性化
                label_factor_data = data_process_api.cs_cap_neutral_process(df=label_factor_data,
                                                                            columns=factor_name_list)
                print('concat factor data cap neutralization done!')
            else:
                if is_ci1_ind_neutral:
                    # 3. 标准化
                    label_factor_data = data_process_api.cs_z_score_normalization_process(df=label_factor_data,
                                                                                          columns=factor_name_list,
                                                                                          copy=True,
                                                                                          verbose=False)
                    print('concat factor data normalized!')

                    # 4. 行业中性化
                    label_factor_data = data_process_api.cs_ci1_ind_neutral_process(df=label_factor_data,
                                                                                    factor_columns=factor_name_list)
                    print('concat factor data ci1-ind neutralization done!')

        # TODO - 去重
        # 合并数据 - 主要是在去掉中信一级行业标签导致的重复数据
        label_factor_data = data_process_api.duplicates_merge(df=label_factor_data,
                                                              columns=factor_name_list,
                                                              rule='mean')

        print('second-step factor data preprocess done!')

        '''股票池进一步过滤'''
        # TODO - 去掉停牌
        label_factor_data = data_process_api.paused_stock_filtration(df=label_factor_data, drop_label=True)

        # TODO - 去掉一字板
        label_factor_data = data_process_api.oneline_stock_filtration(df=label_factor_data, drop_label=True)

        # TODO - 过滤成分股
        if is_select_csi500_pool:
            label_factor_data = data_process_api.get_csi500_component_data(df=label_factor_data, drop_label=True)

        # TODO -  再标准化
        label_factor_data = data_process_api.cs_z_score_normalization_process(df=label_factor_data,
                                                                              columns=factor_name_list,
                                                                              copy=True,
                                                                              verbose=False)
        print('final-step factor data filteration done!')


        # self.factor_data = label_factor_data[['stock_code', 'date', factor_name]].copy()
        # self.factor_data = label_factor_data.copy()

        # del label_factor_data

        et = time()
        print('factor data process all done, time is %.4f sec...' % (et - st))

        return label_factor_data

    # TODO - 计算因子IC和ICIR
    def cal_factor_icir(self,
                        label_factor_data,
                        factor_name,
                        method='rank'):
        '''

        :param label_factor_data_with_return: 因子值和未来收益率的DataFrame
        :param factor_name: 因子名称
        :param ic_periods: 未来预测的时间窗口
        :param method: 方法,: rank, weight

        :return:
        IC: DataFrame
        IC统计量: dict
        '''
        label_factor_data = label_factor_data.copy()
        # tmp = label_factor_data.copy()
        cal_data = label_factor_data.set_index(['date', 'stock_code']).copy()

        ## TODO - 加收益率数据
        cal_data['yret'] = self.ic_ret_data.loc[cal_data.index].copy()

        cal_data = cal_data[['yret', factor_name]]

        cal_data[factor_name] = cal_data.groupby('stock_code')[factor_name].shift(self.ic_period + 1)
        cal_data = cal_data.dropna()

        # cal_data = label_factor_data_with_return.set_index(['date', 'stock_code']).copy()
        # cal_data = label_factor_data_with_return.copy()

        # mr = cal_data.groupby('code')['quote_rate'].rolling(ic_periods).sum()
        # mr.index = mr.index.droplevel('code')
        #
        # cal_data['monthly_return'] = mr
        # cal_data[factor_name] = cal_data.groupby('stock_code')[factor_name].shift(ic_periods + 1)
        # cal_data = cal_data.dropna()
        #
        # cal_data = cal_data.rename(columns={'quote_rate'})

        # TODO 计算加权IC
        def weight_ic(data, direction):
            temp = data.copy()

            temp[factor_name] = temp[factor_name] * direction

            temp['weight'] = temp[factor_name].rank().apply(lambda x: 2 ** (2 * x / temp.shape[0] - 2))

            temp['weight_x'] = temp[factor_name] * temp['weight']

            temp['weight_r'] = temp['yret'] * temp['weight']

            return stats.spearmanr(temp['weight_x'], temp['weight_r'])[0]

        if method == 'rank':
            ic = cal_data.groupby('date').apply(lambda x: stats.spearmanr(x[factor_name], x['yret'])[0])

        elif method == 'weight':
            ic = cal_data.groupby('date').apply(lambda x: stats.spearmanr(x[factor_name], x['yret'])[0])

            direction = 1.0 if ic.mean() > 0 else -1.0

            ic = cal_data.groupby('date').apply(lambda x: weight_ic(x, direction))
        else:
            ic = cal_data.groupby('date').apply(lambda x: stats.pearsonr(x[factor_name], x['yret'])[0])

        ic.name = factor_name

        ic_mean = ic.rolling(self.icir_period).mean()

        ic_std = ic.rolling(self.icir_period).std()

        ic_ir = ic_mean / ic_std

        ic = pd.DataFrame(ic)
        ic.columns = ['ic']

        ic_ir = pd.DataFrame(ic_ir)
        ic_ir.columns = ['ic_ir']

        # TODO - filter data
        ic = ic[(ic.index >= self.start_date) &
                (ic.index <= self.end_date)]

        ic_ir = ic_ir[(ic_ir.index >= self.start_date) &
                      (ic_ir.index <= self.end_date)]

        return ic, ic_ir

    # TODO - 组合回测
    def _portfolio_backtest(self, trade_plan):
        # trade_date = self.trade_date_data.copy()

        cal_data_list = []
        current_data = pd.DataFrame()
        for td in self.trade_date_data:
            # TODO - 选股
            temp = trade_plan[trade_plan['selection_date'] == td].copy()

            # TODO - 第二天开始计算收益率
            if current_data.shape[0] > 0:
                current_data['date'] = td
                current_data = current_data.set_index(['date', 'stock_code'])

                # TODO - 计算每天收益率
                current_data['quote_rate'] = self.ret_data.loc[current_data.index].copy()
                current_data['quote_rate'] = current_data['quote_rate'].fillna(0.0)

                current_data['weight_return'] = current_data['weight'] * current_data['quote_rate']
                cal_data_list.append(current_data.copy())

                # TODO - 权重变动
                current_data['weight'] = current_data['weight'] * (1+current_data['quote_rate'])

                group_sum = current_data.groupby('group')['weight'].sum()

                current_data['group_sum'] = group_sum.loc[current_data['group']].values
                current_data['weight'] = current_data['weight'] / current_data['group_sum']  # 归一化

                current_data = current_data.reset_index()

            # TODO - 选股当天不计算收益率
            if temp.shape[0] > 0:
                current_data = temp.copy()
                # print(td, 'rebalance...')

        daily_cal_data = pd.concat(cal_data_list, axis=0)
        daily_cal_data = daily_cal_data.drop('group_sum', axis=1)

        cal_data = daily_cal_data.copy()

        # TODO 计算分组收益率
        abs_ret = cal_data.groupby(['date', 'group'])['weight_return'].sum()

        abs_ret = abs_ret.reset_index().pivot(index='date', columns='group', values='weight_return')

        abs_ret.columns = ['g%d' % gn for gn in range(1, abs_ret.shape[1]+1)]

        return abs_ret

    # TODO - 构建组合
    def _portfolio_construction(self,
                               labeled_alpha_data,
                               factor_name,
                               is_cap_neutral=True,
                               is_ci1_ind_neutral=True,
                               weight_method='ew',
                               neutral_group_num=5,
                               factor_group_num=5):
        labeled_alpha_data = labeled_alpha_data.copy()
        # tmp = labeled_alpha_data.copy()

        # labeled_alpha_data = labeled_alpha_data[(labeled_alpha_data['date'] >= self.start_date) &
        #                                         (labeled_alpha_data['date'] >= self.end_date)]

        # TODO - 选择selection_date对应天数的因子数据
        labeled_alpha_data = labeled_alpha_data[labeled_alpha_data['date'].isin(self.rebalance_date.index)]

        labeled_alpha_data['selection_date'] = labeled_alpha_data['date']
        labeled_alpha_data['rebalance_start'] = \
            self.rebalance_date.loc[labeled_alpha_data['selection_date'], 'rebalance_start'].values
        labeled_alpha_data['rebalance_end'] = \
            self.rebalance_date.loc[labeled_alpha_data['selection_date'], 'rebalance_end'].values
        labeled_alpha_data['next_rebalance_start'] = \
            self.rebalance_date.loc[labeled_alpha_data['selection_date'], 'next_rebalance_start'].values

        # TODO - 构建股票池
        # TODO - 无中性化分组 + 等权配权
        if not is_cap_neutral:
            if weight_method == 'ew':
                trade_plan_on_rebalance_date = \
                    group_weighting_api.no_neutral_group_and_equally_weighting(df=labeled_alpha_data,
                                                                               factor_name=factor_name,
                                                                               group_num=factor_group_num,
                                                                               is_filter=True)
            else:
                trade_plan_on_rebalance_date = \
                    group_weighting_api.no_neutral_group_and_cap_weighting(df=labeled_alpha_data,
                                                                           factor_name=factor_name,
                                                                           group_num=factor_group_num,
                                                                           is_filter=True)

        else:
            if not is_ci1_ind_neutral:
                if weight_method == 'ew':
                    trade_plan_on_rebalance_date = \
                        group_weighting_api.cap_neutral_group_and_equally_weighting(df=labeled_alpha_data,
                                                                                    factor_name=factor_name,
                                                                                    neutral_factor='scale_total_market_size',
                                                                                    neutral_group_num=neutral_group_num,
                                                                                    group_num=factor_group_num,
                                                                                    is_filter=True)
                else:
                    trade_plan_on_rebalance_date = \
                        group_weighting_api.cap_neutral_group_and_cap_weighting(df=labeled_alpha_data,
                                                                                factor_name=factor_name,
                                                                                group_num=factor_group_num,
                                                                                neutral_factor='scale_total_market_size',
                                                                                neutal_group_num=neutral_group_num,
                                                                                is_filter=True)
            else:
                if weight_method == 'ew':
                    trade_plan_on_rebalance_date = \
                        group_weighting_api.ci1_neutral_group_and_equally_weighting(df=labeled_alpha_data,
                                                                                    factor_name=factor_name,
                                                                                    group_num=factor_group_num,
                                                                                    is_filter=True)
                elif weight_method == 'ci1':
                    trade_plan_on_rebalance_date = \
                        group_weighting_api.ci1_neutral_group_and_benchmark_ci1_neutral_weighting(df=labeled_alpha_data,
                                                                                                  factor_name=factor_name,
                                                                                                  group_num=factor_group_num,
                                                                                                  is_filter=True)
                else:
                    trade_plan_on_rebalance_date = \
                        group_weighting_api.ci1_neutral_group_and_cap_weighting(df=labeled_alpha_data,
                                                                                factor_name=factor_name,
                                                                                group_num=factor_group_num,
                                                                                is_filter=True)

        return trade_plan_on_rebalance_date

    # TODO - 计算因子组合构建和回测
    def factor_portfolio_basic_calc(self, factor_name, ic_method='rank'):
        # TODO - 因子数据处理
        factor_data = self.load_factor_data(factor_name)

        label_factor_data = self.factor_data_process(factor_data, factor_name)

        # TODO - 计算IC + IC_IR
        ic, icir = self.cal_factor_icir(label_factor_data=label_factor_data,
                                        factor_name=factor_name,
                                        method=ic_method)

        # TODO - 构建组合
        # TODO - 配权
        trade_plan_on_rebalance_date = self._portfolio_construction(labeled_alpha_data=label_factor_data,
                                                                    factor_name=factor_name,
                                                                    is_cap_neutral=self.is_cap_neutral,
                                                                    is_ci1_ind_neutral=self.is_ci1_ind_neutral,
                                                                    weight_method=self.weight_method,
                                                                    neutral_group_num=self.neutral_group_num,
                                                                    factor_group_num=self.factor_group_num)

        # TODO - 计算分组收益率
        abs_ret = self._portfolio_backtest(trade_plan_on_rebalance_date)

        save_dir = os.path.join(self.result_save_dir, factor_name)

        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        # TODO - 保存数据
        ic.to_excel(os.path.join(save_dir, factor_name+'_ic_data.xlsx'))

        icir.to_excel(os.path.join(save_dir, factor_name+'_icir_data.xlsx'))

        abs_ret.to_excel(os.path.join(save_dir, factor_name+'_group_abs_ret_data.xlsx'))

        trade_plan_on_rebalance_date.to_hdf(os.path.join(save_dir, factor_name+'_group_stock.h5'),
                                            key=factor_name+'_group_stock')

        print(factor_name, '- ic & icir & group info saved!\n')

    ##
    # TODO - 计算组合盈利
    def _get_portfolio_value(self, df, cap_weight):
        stock_pool_add_value = df.copy()

        stock_pool_add_value = stock_pool_add_value.set_index(['stock_code', 'selection_date'])

        stock_pool_add_value['bp'] = self.bp.loc[stock_pool_add_value.index]

        if cap_weight:
            stock_pool_add_value['size'] = self.label_data.loc[stock_pool_add_value.index, 'scale_total_market_size']

        stock_pool_add_value['bp'] = stock_pool_add_value['bp'].fillna(0.0)

        stock_pool_add_value = stock_pool_add_value.dropna()

        stock_pool_add_value = stock_pool_add_value.reset_index()

        def get_group_ind(df):
            df = df.copy()

            # df['weighted_ep'] = df['ep'] * df['weight']
            df['weighted_bp'] = df['bp'] * df['weight']
            if not cap_weight:
                df['weighted_bp'] = df['bp'] * df['weight']
            else:
                df['weighted_bp'] = df['bp'] * df['size'] / df['size'].sum()

            return df['weighted_bp'].sum()

        stock_pool_value = stock_pool_add_value.groupby(['selection_date', 'group']).apply(get_group_ind)

        stock_pool_value = pd.DataFrame({'bp': stock_pool_value})

        stock_pool_value = stock_pool_value.reset_index()

        return stock_pool_value

    def _get_portfolio_profit(self, df, cap_weight):
        stock_pool_add_profit = df.copy()

        stock_pool_add_profit = stock_pool_add_profit.set_index(['stock_code', 'selection_date'])
        stock_pool_add_profit['profit'] = self.profit_ind.loc[stock_pool_add_profit.index]

        if cap_weight:
            stock_pool_add_profit['size'] = self.label_data.loc[stock_pool_add_profit.index, 'scale_total_market_size']

        stock_pool_add_profit['profit'] = stock_pool_add_profit['profit'].fillna(0.0)

        stock_pool_add_profit = stock_pool_add_profit.dropna()

        stock_pool_add_profit = stock_pool_add_profit.reset_index()

        def get_group_ind(df):
            df = df.copy()

            if not cap_weight:
                df['weighted_profit'] = df['profit'] * df['weight']
            else:
                df['weighted_profit'] = df['profit'] * df['size'] / df['size'].sum()

            return df['weighted_profit'].sum()

        stock_pool_profit = stock_pool_add_profit.groupby(['selection_date', 'group']).apply(get_group_ind)

        stock_pool_profit = pd.DataFrame({'profit': stock_pool_profit})

        stock_pool_profit = stock_pool_profit.reset_index()

        return stock_pool_profit

    def _get_portfolio_growth(self, df, cap_weight):
        stock_pool_add_growth = df.copy()

        stock_pool_add_growth = stock_pool_add_growth.set_index(['stock_code', 'selection_date'])
        stock_pool_add_growth['growth'] = self.growth_ind.loc[stock_pool_add_growth.index]

        if cap_weight:
            stock_pool_add_growth['size'] = self.label_data.loc[stock_pool_add_growth.index, 'scale_total_market_size']

        stock_pool_add_growth['growth'] = stock_pool_add_growth['growth'].fillna(0.0)

        stock_pool_add_growth = stock_pool_add_growth.dropna()

        stock_pool_add_growth = stock_pool_add_growth.reset_index()

        def get_group_ind(df):
            df = df.copy()

            if not cap_weight:
                df['weighted_growth'] = df['growth'] * df['weight']
            else:
                df['weighted_growth'] = df['growth'] * df['size'] / df['size'].sum()

            return df['weighted_growth'].sum()

        stock_pool_growth = stock_pool_add_growth.groupby(['selection_date', 'group']).apply(get_group_ind)

        stock_pool_growth = pd.DataFrame({'growth': stock_pool_growth})

        stock_pool_growth = stock_pool_growth.reset_index()

        return stock_pool_growth

    def _get_portfolio_momentum(self, df, cap_weight):
        stock_pool_add_mom = df.copy()

        stock_pool_add_mom = stock_pool_add_mom.set_index(['stock_code', 'selection_date'])
        stock_pool_add_mom['mom'] = self.mom.loc[stock_pool_add_mom.index]

        if cap_weight:
            stock_pool_add_mom['size'] = self.label_data.loc[stock_pool_add_mom.index, 'scale_total_market_size']

        stock_pool_add_mom['mom'] = stock_pool_add_mom['mom'].fillna(0.0)

        stock_pool_add_mom = stock_pool_add_mom.dropna()

        stock_pool_add_mom = stock_pool_add_mom.reset_index()

        def get_group_ind(df):
            df = df.copy()

            if not cap_weight:
                df['weighted_mom'] = df['mom'] * df['weight']
            else:
                df['weighted_mom'] = df['mom'] * df['size'] / df['size'].sum()

            return df['weighted_mom'].sum()

        stock_pool_mom = stock_pool_add_mom.groupby(['selection_date', 'group']).apply(get_group_ind)

        stock_pool_mom = pd.DataFrame({'mom': stock_pool_mom})

        stock_pool_mom = stock_pool_mom.reset_index()

        return stock_pool_mom

    def _get_portfolio_beta(self, df, cap_weight):
        stock_pool_add_beta = df.copy()

        stock_pool_add_beta = stock_pool_add_beta.set_index(['stock_code', 'selection_date'])
        stock_pool_add_beta['beta'] = self.barra_style_exposure.loc[stock_pool_add_beta.index, 'beta']

        if cap_weight:
            stock_pool_add_beta['size'] = self.label_data.loc[stock_pool_add_beta.index, 'scale_total_market_size']

        stock_pool_add_beta['beta'] = stock_pool_add_beta['beta'].fillna(0.0)

        stock_pool_add_beta = stock_pool_add_beta.dropna()

        stock_pool_add_beta = stock_pool_add_beta.reset_index()

        def get_group_ind(df):
            df = df.copy()

            if not cap_weight:
                df['weighted_mom'] = df['beta'] * df['weight']
            else:
                df['weighted_mom'] = df['beta'] * df['size'] / df['size'].sum()

            return df['weighted_mom'].sum()

        stock_pool_beta = stock_pool_add_beta.groupby(['selection_date', 'group']).apply(get_group_ind)

        stock_pool_beta = pd.DataFrame({'beta': stock_pool_beta})

        stock_pool_beta = stock_pool_beta.reset_index()

        return stock_pool_beta

    def _get_portfolio_turnover(self, df, cap_weight):
        stock_pool_add_turnover = df.copy()

        stock_pool_add_turnover = stock_pool_add_turnover.set_index(['stock_code', 'selection_date'])
        stock_pool_add_turnover['turnover'] = self.barra_style_exposure.loc[stock_pool_add_turnover.index, 'liquidity']

        if cap_weight:
            stock_pool_add_turnover['size'] = self.label_data.loc[stock_pool_add_turnover.index, 'scale_total_market_size']

        stock_pool_add_turnover['turnover'] = stock_pool_add_turnover['turnover'].fillna(0.0)

        stock_pool_add_turnover = stock_pool_add_turnover.dropna()

        stock_pool_add_turnover = stock_pool_add_turnover.reset_index()

        def get_group_ind(df):
            df = df.copy()

            if not cap_weight:
                df['weighted_turnover'] = df['turnover'] * df['weight']
            else:
                df['weighted_turnover'] = df['turnover'] * df['size'] / df['size'].sum()

            return df['weighted_turnover'].sum()

        stock_pool_turnover = stock_pool_add_turnover.groupby(['selection_date', 'group']).apply(get_group_ind)

        stock_pool_turnover = pd.DataFrame({'turnover': stock_pool_turnover})

        stock_pool_turnover = stock_pool_turnover.reset_index()

        return stock_pool_turnover

    def _get_portfolio_volatility(self, df, cap_weight):
        stock_pool_add_volatility = df.copy()

        stock_pool_add_volatility = stock_pool_add_volatility.set_index(['stock_code', 'selection_date'])
        stock_pool_add_volatility['volatility'] = self.barra_style_exposure.loc[stock_pool_add_volatility.index, 'volatility']

        if cap_weight:
            stock_pool_add_volatility['size'] = self.label_data.loc[stock_pool_add_volatility.index, 'scale_total_market_size']

        stock_pool_add_volatility['volatility'] = stock_pool_add_volatility['volatility'].fillna(0.0)

        stock_pool_add_volatility = stock_pool_add_volatility.dropna()

        stock_pool_add_volatility = stock_pool_add_volatility.reset_index()

        def get_group_ind(df):
            df = df.copy()

            if not cap_weight:
                df['weighted_volatility'] = df['volatility'] * df['weight']
            else:
                df['weighted_volatility'] = df['volatility'] * df['size'] / df['size'].sum()

            return df['weighted_volatility'].sum()

        stock_pool_volatility = stock_pool_add_volatility.groupby(['selection_date', 'group']).apply(get_group_ind)

        stock_pool_volatility = pd.DataFrame({'volatility': stock_pool_volatility})

        stock_pool_volatility = stock_pool_volatility.reset_index()

        return stock_pool_volatility

    # TODO - 计算因子指标V1
    def factor_portfolio_advanced_calc_v1(self, factor_name, cap_weight=True):
        save_dir = os.path.join(self.result_save_dir, factor_name)
        result_save_dir = os.path.join(self.result_save_dir, factor_name, 'indicator')

        if not os.path.exists(result_save_dir):
            os.makedirs(result_save_dir)

        # TODO - 读取数据
        # ic = pd.read_excel(os.path.join(save_dir, factor_name+'_ic_data.xlsx'))
        # icir = pd.read_excel(os.path.join(save_dir, factor_name+'_icir_data.xlsx'))
        # abs_ret = pd.read_excel(os.path.join(save_dir, factor_name+'_group_abs_ret_data.xlsx'))
        trade_plan_on_rebalance_date = pd.read_hdf(os.path.join(save_dir, factor_name+'_group_stock.h5'))

        '''基本面分析'''
        # TODO - 计算组合估值
        stock_pool_value = self._get_portfolio_value(df=trade_plan_on_rebalance_date, cap_weight=cap_weight)

        # TODO - 计算组合盈利
        stock_pool_profit = self._get_portfolio_profit(df=trade_plan_on_rebalance_date, cap_weight=cap_weight)

        # TODO - 计算组合成长
        stock_pool_growth = self._get_portfolio_growth(df=trade_plan_on_rebalance_date, cap_weight=cap_weight)

        # TODO - 计算组合动量
        stock_pool_mom = self._get_portfolio_momentum(df=trade_plan_on_rebalance_date, cap_weight=cap_weight)

        # TODO - 计算组合beta
        stock_pool_beta = self._get_portfolio_beta(df=trade_plan_on_rebalance_date, cap_weight=cap_weight)

        '''波动和换手分析'''
        # TODO - 计算组合波动率
        stock_pool_volatility = self._get_portfolio_volatility(df=trade_plan_on_rebalance_date, cap_weight=cap_weight)

        # TODO - 计算组合换手率
        stock_pool_turnover = self._get_portfolio_turnover(df=trade_plan_on_rebalance_date, cap_weight=cap_weight)
        # stock_pool_beta = self._get_portfolio_turnover(df=trade_plan_on_rebalance_date, cap_weight=cap_weight)

        # TODO - 保存
        stock_pool_value.to_excel(os.path.join(result_save_dir, factor_name+'_group_valuation_analysis.xlsx'))
        stock_pool_profit.to_excel(os.path.join(result_save_dir, factor_name+'_group_profitability_analysis.xlsx'))
        stock_pool_growth.to_excel(os.path.join(result_save_dir, factor_name+'_group_growth_analysis.xlsx'))
        stock_pool_mom.to_excel(os.path.join(result_save_dir, factor_name+'_group_mom_analysis.xlsx'))
        stock_pool_beta.to_excel(os.path.join(result_save_dir, factor_name+'_group_beta_analysis.xlsx'))
        stock_pool_volatility.to_excel(os.path.join(result_save_dir, factor_name+'_group_volatility_analysis.xlsx'))
        stock_pool_turnover.to_excel(os.path.join(result_save_dir, factor_name+'_group_turnover_analysis.xlsx'))


    # TODO - 计算因子指标V2
    def factor_portfolio_advanced_calc_v2(self, factor_name, direction=None):
        save_dir = os.path.join(self.result_save_dir, factor_name)
        result_save_dir = os.path.join(self.result_save_dir, factor_name, 'indicator')

        if not os.path.exists(result_save_dir):
            os.makedirs(result_save_dir)

        # TODO - 读取数据
        ic = pd.read_excel(os.path.join(save_dir, factor_name+'_ic_data.xlsx'))
        # icir = pd.read_excel(os.path.join(save_dir, factor_name+'_icir_data.xlsx'))
        abs_ret = pd.read_excel(os.path.join(save_dir, factor_name+'_group_abs_ret_data.xlsx'))
        trade_plan_on_rebalance_date = pd.read_hdf(os.path.join(save_dir, factor_name+'_group_stock.h5'))

        # direction
        if direction is None:
            direction = 1.0 if ic['ic'].mean() > 0 else -1.0

        # TODO - add benchmark return
        abs_ret_add_benchmark = abs_ret.copy()

        abs_ret_add_benchmark['bench'] = self.stock_index_data.loc[abs_ret_add_benchmark['date'], 'return'].values

        if direction == 1:
            abs_ret_add_benchmark['long_short'] = abs_ret_add_benchmark['g5'] - abs_ret_add_benchmark['g1']
            abs_ret_add_benchmark['excess'] = abs_ret_add_benchmark['g5'] - abs_ret_add_benchmark['bench']
        else:
            abs_ret_add_benchmark['long_short'] = abs_ret_add_benchmark['g1'] - abs_ret_add_benchmark['g5']
            abs_ret_add_benchmark['excess'] = abs_ret_add_benchmark['g1'] - abs_ret_add_benchmark['bench']

        abs_ret_add_benchmark['direction'] = direction

        abs_ret_add_benchmark = abs_ret_add_benchmark.set_index('date')

        # TODO - 计算超额收益统计
        excess_return_stats = abs_ret_add_benchmark[['excess']].copy()

        excess_return_stats['excess_std_20d'] = excess_return_stats['excess'].rolling(20).std() * ((250/20)**0.5)
        excess_return_stats['excess_std_60d'] = excess_return_stats['excess'].rolling(60).std() * ((250/60)**0.5)
        excess_return_stats['excess_std_120d'] = excess_return_stats['excess'].rolling(120).std() * ((250/120)**0.5)
        excess_return_stats['excess_std_250d'] = excess_return_stats['excess'].rolling(250).std()

        excess_return_stats['excess_cum_20d'] = excess_return_stats['excess'].rolling(20).sum()
        excess_return_stats['excess_cum_60d'] = excess_return_stats['excess'].rolling(60).sum()
        excess_return_stats['excess_cum_120d'] = excess_return_stats['excess'].rolling(120).sum()
        excess_return_stats['excess_cum_250d'] = excess_return_stats['excess'].rolling(250).sum()

        # excess_return_stats['excess_ir_20d'] = \
        #     excess_return_stats['excess'].rolling(20).mean() / excess_return_stats['excess'].rolling(20).std()
        excess_return_stats['excess_ir_60d'] = \
            excess_return_stats['excess'].rolling(60).mean() / excess_return_stats['excess'].rolling(60).std()
        excess_return_stats['excess_ir_120d'] = \
            excess_return_stats['excess'].rolling(120).mean() / excess_return_stats['excess'].rolling(120).std()
        excess_return_stats['excess_ir_250d'] = \
            excess_return_stats['excess'].rolling(250).mean() / excess_return_stats['excess'].rolling(250).std()


        # TODO - 计算配对相关性

        # abs_ret_add_benchmark['direction']

        # TODO - 计算多空波动率




        # TODO - 计算横截面离散度


        # TODO - 计算集中度

