#!/usr/bin/python
# -*-coding:utf-8-*-

'''批量分析因子'''
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# plt.switch_backend('agg')

# plt.style.use('seaborn')
# plt.style.use('seaborn-white')
plt.rcParams['axes.unicode_minus'] = False

import os
import gc
import pandas as pd
import numpy as np
from time import time
import datetime

from zg_data_process.zg_data_concat import DataConcat

from zg_factor_analysis_module.base.FactorAnalysisLib import FactorAnalysisLib

from zg_factor_analysis_module.factor_analysis_class import FactorAnalysis

from zg_factor_analysis_module.support_functions import transform_float64_to_float32

from zg_factor_analysis_module.base.dir_info import factor_lib_dir

from zg_factor_analysis_module.trade_date_data_generation import get_monthly_first_monday_trade_date

class FactorAnalysisPipeline(FactorAnalysis):
    def __init__(self,
                 start_date,
                 end_date,
                 group_quantile = 5,
                 neutral_group_quantile = 5,
                 rebalance_periods=21,
                 rebalance_type='fixed',
                 universe_pool='000905',
                 benchmark_index_code='000905',
                 liquidity_filter=None,
                 liquidity_filter_period=21,
                 group_method='cap',
                 weight_method='ew',
                 industry_type='sw1',
                 factor_database='public',
                 factor_lib_dir=factor_lib_dir,
                 is_plot_quantile_abs_return=False,
                 is_save_quantile_detail=False,
                 is_do_regress_analysis=False):

        '''
        :param start_date: 开始日期
        :param end_date: 结束日期
        :param group_quantile: 分组数
        :param neutral_group_quantile: 市值中性化分组数
        :param rebalance_periods: 调仓频率，当rebalance_type!='strategy'的时候起作用
        :param rebalance_type: 调仓方式，'strategy' 或者 'fixed'
        :param universe_pool: 候选股票池，
        :param liquidity_filter: 基于流动性过滤股票池， money或turnover
        :param liquidity_filter_period: 基于流动性过滤股票池， avg_money或avg_turnover的计算周期
        :param benchmark_index_code: 对比基准代码，用于计算相对于基准的超额收益率
        :param group_method: 分组方法, sw1, cap, other(不中性化)
        :param weight_method: 权重配置方法, ew, cir_cap, total_cap
        :param factor_database: 因子库名称
        :param factor_lib_dir: 因子库读取路径
        :param is_plot_quantile_abs_return: 是否画绝对收益累计图
        :param is_save_quantile_detail: 是否保存分组分析数据
        :param is_do_regress_analysis: 是否进行回归分析
        '''
        super(FactorAnalysisPipeline, self).__init__()

        # 初始化读取因子的API
        self.factor_data_reader = FactorAnalysisLib(factor_lib_dir=factor_lib_dir,
                                                    db=factor_database)

        # 初始化参数
        self.start_date = start_date

        self.end_date = end_date

        self.group_quantile = group_quantile
        self.neutral_group_quantile = neutral_group_quantile

        self.rebalance_periods = rebalance_periods

        self.rebalance_type = rebalance_type

        self.universe_pool = universe_pool

        self.benchmark_index_code = benchmark_index_code

        # self.factor_database = factor_database

        self.liquidity_filter = liquidity_filter

        self.liquidity_filter_period = liquidity_filter_period

        self.is_save_quantile_detail = is_save_quantile_detail

        # 过滤的阈值
        self.liquidity_filter_threshold = 5e6
        self.liquidity_top_pct = 0.9

        self.group_method = group_method

        self.weight_method = weight_method

        self.industry_type = industry_type

        # 是否输出绝对收益分组图
        self.is_plot_quantile_abs_return = is_plot_quantile_abs_return

        # 是否进行回归分析
        self.is_do_regress_analysis = is_do_regress_analysis

    # TODO - 获取流通市值数据
    def get_circulate_size_data(self):
        # circulate_size = self.data_reader.read_factor_table(filename='scale_circulate_market_size',
        #                                                     filter_list=["date >= '%s' and date <= '%s'" %
        #                                                                  (self.start_date, self.end_date)])

        circulate_size = self.data_reader.read_factor_table(filename='scale_circulate_market_size')

        circulate_size = circulate_size[(circulate_size['date'] >= self.start_date) &
                                        (circulate_size['date'] <= self.end_date)]

        circulate_size.set_index(['stock_code','date'],inplace=True)

        # TODO - 类型转化
        circulate_size = transform_float64_to_float32(circulate_size)

        return circulate_size

    def get_total_size_data(self):
        # total_size = self.data_reader.read_factor_table(filename='scale_total_market_size',
        #                                                     filter_list=["date >= '%s' and date <= '%s'" %
        #                                                                  (self.start_date, self.end_date)])
        total_size = self.data_reader.read_factor_table(filename='scale_total_market_size')

        total_size = total_size[(total_size['date'] >= self.start_date) &
                                (total_size['date'] <= self.end_date)]

        total_size.set_index(['stock_code','date'],inplace=True)

        # TODO - 类型转化
        total_size = transform_float64_to_float32(total_size)

        return total_size

    # TODO - 获取交易数据,设置索引date,code
    def get_trade_data(self):
        start_date = pd.to_datetime(self.start_date) - pd.Timedelta(days=self.rebalance_periods*3)
        trade_data = self.data_reader.read_basic_data_table(filename='processed_daily_stock_trade_data',
                                                            columns=['code',
                                                               'open',
                                                               'high',
                                                               'low',
                                                               'date',
                                                               'close',
                                                               'back_adjfactor',
                                                               'money',
                                                               'turnover'],
                                                            filter_list=["date >= '%s' and date <= '%s'" %
                                                                   (start_date, self.end_date)])

        trade_data['close_back'] =trade_data['close'] * trade_data['back_adjfactor']
        trade_data['open_back'] =trade_data['open'] * trade_data['back_adjfactor']
        trade_data.set_index(['code','date'], inplace=True)

        ret_data = trade_data.groupby('code')['close_back'].pct_change(1)
        self.ret_data = ret_data.copy()

        trade_data['quote_rate'] = ret_data.loc[trade_data.index]
        self.ret_data = self.ret_data.reset_index()
        self.ret_data = self.ret_data.set_index(['date', 'code'])
        del ret_data

        if self.liquidity_filter is not None:
            avg_liquidity_item = \
                trade_data.groupby('code', as_index=False)[[self.liquidity_filter]].rolling(self.liquidity_filter_period).mean()
            avg_liquidity_item = avg_liquidity_item.reset_index(['code','date'])
            avg_liquidity_item.set_index(['code','date'], inplace=True)

            trade_data['avg_liquidity'] = avg_liquidity_item.loc[trade_data.index].copy()
            trade_data['avg_liquidity'] = trade_data['avg_liquidity'].fillna(0.0)

            del avg_liquidity_item

            # trade_data = trade_data.reset_index()
            def filter_universe_label(df,
                                      key='avg_liquidity',
                                      top=self.liquidity_top_pct,
                                      min_value=self.liquidity_filter_threshold):
                df_used = df.copy()
                df_used = df_used[df_used[key] > 0.0]

                df_used = df_used.sort_values(key, ascending=False)

                df_used = df_used.iloc[:int(top*df_used.shape[0]), :].copy()

                df_used = df_used[df_used[key] >= min_value]

                df_used['filter_label'] = 1.0

                df_used = df_used.reindex(df.index)

                df_used['filter_label'] = df_used['filter_label'].fillna(0.0)

                return df_used[['filter_label']]

            liquidity_label = trade_data.groupby('date').apply(filter_universe_label)

            trade_data['liquidity_label'] = liquidity_label.loc[trade_data.index].copy()
            trade_data['liquidity_label'] = trade_data['liquidity_label'].fillna(0.0)

        trade_data = trade_data.reset_index('date')
        trade_data = trade_data[trade_data['date'] >= self.start_date]
        trade_data = trade_data.set_index('date', append=True)

        # TODO - 类型转化
        trade_data = transform_float64_to_float32(trade_data)

        # # new_test
        # def calc_num(df):
        #     cond = (df['money'] > 0) & \
        #            (df['liquidity_label'] == 1) & \
        #            (df['high'] != df['low'])
        #
        #     before_filter_num = df.shape[0]
        #     after_filter_num = df[cond].shape[0]
        #     date = df.index[0][1]
        #
        #     print(date, 'before filtered num is %d, after filter num is %d.' % (before_filter_num, after_filter_num))
        #
        # trade_data.groupby('date').apply(calc_num)

        return trade_data

    # TODO - 获取指数涨跌幅数据
    def get_daily_index_trade_data(self):
        index_trade_data = self.data_reader.read_basic_data_table('processed_daily_index_trade_data')

        index_trade_data = index_trade_data[index_trade_data['index_code'] == self.benchmark_index_code]
        index_trade_data['quote_rate'] = index_trade_data['close'].pct_change()
        index_trade_data.index = pd.to_datetime(index_trade_data.date)

        # TODO - 类型转化
        index_trade_data = transform_float64_to_float32(index_trade_data)

        return index_trade_data

    # TODO - 标签数据获取与拼接
    def load_analysis_data(self, trade_data):
        # TODO 读取通用数据
        st = time()
        # trade_data = self.trade_data.reset_index().copy()
        trade_data = trade_data.reset_index()

        trade_data['stock_code'] = trade_data['code'].copy()
        if self.industry_type == 'sw1':
            data_concat_api = DataConcat(label_data_filename='processed_new_stock_label_sw1_data')
        else:
            data_concat_api = DataConcat(label_data_filename='processed_new_stock_label_ci1_data')
        # data_concat_api.buffer_data_dir=buffer_data_dir

        start = trade_data['date'].min().strftime('%Y-%m-%d')
        end = trade_data['date'].max().strftime('%Y-%m-%d')

        # TODO 标签数据拼接
        labeled_factor_data = data_concat_api.label_factor_data(df=trade_data,
                                                                start_date=start,
                                                                end_date=end,
                                                                cache=False,
                                                                # cache_filename='wsc_test_labeled_cache_data',
                                                                cache_filename=None,
                                                                add_size_factor=False,
                                                                refresh=True,
                                                                verbose=True)

        labeled_factor_data = labeled_factor_data.set_index(['stock_code', 'date'])

        # labeled_factor_data['scale_total_market_size'] = np.log(labeled_factor_data['scale_total_market_size'])

        # TODO 交易数据、市值数据拼接
        # circulate_size = self.circulate_size.copy()
        labeled_factor_data = pd.concat([labeled_factor_data, self.circulate_size, self.total_size],
                                axis=1,
                                join_axes=[labeled_factor_data.index])

        # labeled_factor_data['code'] = labeled_factor_data.index.get_level_values('stock_code')
        labeled_factor_data['scale_total_market_size'] = np.log(labeled_factor_data['scale_total_market_size'])
        labeled_factor_data['scale_circulate_market_size'] = np.log(labeled_factor_data['scale_circulate_market_size'])

        # labeled_factor_data = labeled_factor_data.reset_index('stock_code')

        # concat_data = concat_data.reset_index().set_index(['stock_code', 'date'])

        # TODO - 类型转化
        # concat_data = transform_float64_to_float32(concat_data)
        labeled_factor_data = transform_float64_to_float32(labeled_factor_data)

        # TODO - 回收一些内存
        # del data_concat_api
        # del circulate_size
        # del labeled_factor_data
        gc.collect()

        et = time()
        print('load analysis data finish, time spent is %.5f sec.\n' % (et - st))

        # return concat_data
        return labeled_factor_data

    # TODO - 公用数据获取(初始化)
    def initialization(self):
        # self.trade_data = self.get_trade_data()
        trade_data = self.get_trade_data()

        self.circulate_size = self.get_circulate_size_data()

        self.total_size = self.get_total_size_data()

        self.index_data = self.get_daily_index_trade_data()

        # self.trade_date = sorted(self.trade_data.index.get_level_values('date').unique().tolist())[1:]
        # self.trade_date = sorted(trade_data.index.get_level_values('date').unique().tolist())[1:]
        self.trade_date = sorted(trade_data.index.get_level_values('date').unique().tolist())

        #TODO 生成调仓日期列表
        if self.rebalance_type == 'strategy':
            # self.rebalance_date_data = self.get_monthly_first_monday_trade_date().index.tolist()
            # self.rebalance_date_data = sorted(list(set(self.rebalance_date_data).intersection(set(self.trade_date))))
            start = min(self.trade_date).strftime('%Y-%m-%d')
            end = max(self.trade_date).strftime('%Y-%m-%d')

            self.rebalance_date_data = get_monthly_first_monday_trade_date(start_date=start,
                                                                           end_date=end,
                                                                           filter_date='rebalance_start')

            self.rebalance_date_data = self.rebalance_date_data['selection_date'].tolist()
        else:
            # self.rebalance_date = self.trade_date[::self.rebalance_type]
            self.rebalance_date_data = self.trade_date[::self.rebalance_periods]

        print('rebalance type is %s, rebalance date are' % (self.rebalance_type))
        print(self.rebalance_date_data[-5:])

        #TODO 获取标签等通用数据
        self.labeled_analysis_data = self.load_analysis_data(trade_data)

        del trade_data
        gc.collect()

    # TODO - 因子数据提取
    def get_factor_data(self, factor_name):  # 提取因子数据,设置索引 date,stock_code
        factor_data = self.factor_data_reader.read_factor_table(filename=factor_name,
                                                             filter_list=["date >= '%s' and date <= '%s'" %
                                                                          (self.start_date, self.end_date)])

        factor_data.rename(columns={'stkcode': 'stock_code', 'code': 'stock_code'}, inplace=True)

        factor_data = factor_data.drop_duplicates(subset=['date', 'stock_code'], keep='last')

        factor_data.set_index(['stock_code', 'date'], inplace=True)

        return factor_data

    # TODO - 添加因子数据
    def load_and_concat_factor_data(self, factor_name):
        factor_data = self.get_factor_data(factor_name)

        concat_data=pd.concat([self.labeled_analysis_data, factor_data],
                               axis=1,
                               join_axes=[self.labeled_analysis_data.index])

        print('load factor data finish!')

        return concat_data.reset_index()

    # TODO - data filter
    def fillter_analysis_data(self,
                              concat_data,
                              filter_st=True,
                              filter_oneline=True,
                              filter_sub_new=True,
                              filter_paused=True):
        concat_data=concat_data.set_index(['stock_code', 'date'])

        # ST过滤
        if filter_st:
            concat_data =self.data_process.st_filteration(concat_data, drop_label=False)

        # 停牌股过滤
        if filter_paused:
            concat_data = self.data_process.paused_stock_filtration(concat_data, drop_label=False)

        # 新股次新股过滤
        if filter_sub_new:
            concat_data = self.data_process.new_subnew_stock_filtration(concat_data, drop_label=False)

        # 一字板股票过滤
        if filter_oneline:
            concat_data = self.data_process.oneline_stock_filtration(concat_data, drop_label=False)

        # 股票池过滤
        if type(self.universe_pool) == str:
            if self.universe_pool == '000905':
                concat_data = self.data_process.get_csi500_component_data(concat_data, drop_label=False)
            elif self.universe_pool == '000300':
                concat_data = self.data_process.get_csi300_component_data(concat_data, drop_label=False)
            elif self.universe_pool == '399102':
                concat_data = self.data_process.get_cybz_component_data(concat_data, drop_label=False)
            elif self.universe_pool == '399006':
                concat_data = self.data_process.get_cyb100_component_data(concat_data, drop_label=False)
            elif self.universe_pool == '399673':
                concat_data = self.data_process.get_cyb50_component_data(concat_data, drop_label=False)
            elif self.universe_pool == '000852':
                concat_data = self.data_process.get_csi1000_component_data(concat_data, drop_label=False)
            elif self.universe_pool == '000903':
                concat_data = self.data_process.get_csi100_component_data(concat_data, drop_label=False)
            elif self.universe_pool == '000016':
                concat_data = self.data_process.get_sh50_component_data(concat_data, drop_label=False)
            elif self.universe_pool == 'all':
                # 全部A股
                pass
            else:
                pass
        else:
            # 给定基准池
            self.universe_pool['label'] = 1.0

            concat_data['is_basic'] = self.universe_pool.loc[concat_data.index, 'label']
            concat_data['is_basic'] = concat_data['is_basic'].fillna(0.0)

            concat_data = concat_data[concat_data['is_basic'] == 1.0]

            concat_data.drop('is_basic', axis=1, inplace=True)

        # 过滤流动性股票池
        if self.liquidity_filter is not None:
            concat_data = concat_data[concat_data['liquidity_label'] == 1.0]

        print('filter analysis data finish!')
        return  concat_data.reset_index()

    #TODO - data processed
    def analysis_data_processed(self,
                                concat_data,
                                factor_name,
                                fillna=True,
                                winsorize=True,
                                normalization=True,
                                drop_duplicates=True):
        # 因子缺失值填充
        if fillna:
            if self.industry_type == 'sw1':
                concat_data = self.data_process.filled_with_sw1_stats_value(concat_data, [factor_name])
            else:
                concat_data = self.data_process.filled_with_ci1_stats_value(concat_data, [factor_name])

        # 其他异常数据去除
        concat_data = concat_data.dropna()

        # 去极值处理
        if winsorize:
            concat_data = self.data_process.cs_medcouple_outlier_process(concat_data, [factor_name])

        # 标准化处理
        if normalization:
            concat_data = self.data_process.cs_z_score_normalization_process(concat_data, [factor_name])

        # 数据去重
        if drop_duplicates:
            concat_data = concat_data.drop_duplicates(['stock_code', 'date'], keep='last')

        print('processed analysis data finish!')

        return concat_data

    #TODO 添加分组标签
    def add_group_lebel(self, df, factor_name, method='cap', group_num=5):
        df=df.reset_index('date').copy()
        if method=='cap':
            grouping = pd.qcut(df['scale_total_market_size'].rank(method='first'),group_num, labels=False)
            grouped = df[factor_name].rank(method='first').groupby(grouping).apply(pd.qcut, self.group_quantile, labels=False) + 1
        elif method=='sw1':
            grouping =df['sw1_name']
            grouped = df[factor_name].rank(method='first').groupby(grouping).apply(pd.qcut, self.group_quantile, labels=False) + 1
        elif method=='ci1':
            grouping =df['ci1_name']
            grouped = df[factor_name].rank(method='first').groupby(grouping).apply(pd.qcut, self.group_quantile, labels=False) + 1
        else:
            grouped = pd.qcut(df[factor_name].rank(method='first'), self.group_quantile, labels=False) + 1

        return grouped

    #TODO 计算股票权重
    def cal_stock_weight(self, df, method='ew'):
        if method=='cir_cap':
            weight =df.groupby('group').apply(
                lambda x: x['scale_circulate_market_size']/x['scale_circulate_market_size'].sum()
            )
        elif method=='total_cap':
            weight =df.groupby('group').apply(
                lambda x: x['scale_total_market_size'] / x['scale_total_market_size'].sum()
            )
        else:
            df['weight']=1
            weight =df.groupby('group').apply(lambda x: x['weight']/ x.shape[0])

        weight.index=weight.index.droplevel('group')
        weight= weight.sort_index()

        return weight

    #TODO - 画分组绝对收益曲线图
    def plot_periods_abs_ret(self, ret, factor_name, plot_save_dir):
        cum_ret=(ret+1).cumprod()

        # plt.style.use('classic')

        # plt.figure(figsize=(20, 10))
        cum_ret.plot(legend=True, fontsize=12, lw=1.5, alpha=0.8, figsize=(20, 10))
        plt.title(factor_name +' quantile return', fontsize=12)
        plt.legend(loc=2, bbox_to_anchor=(1, 1), borderaxespad=0, fontsize=12)

        if type(self.universe_pool) == str:
            save_filename = '%s_%s_bench_%s_pool_quantile_abs_return_%s_to_%s.png' % (factor_name,
                                                                                             self.benchmark_index_code,
                                                                                             self.universe_pool,
                                                                                             self.start_date,
                                                                                             self.end_date)
        else:
            save_filename = '%s_%s_bench_%s_pool_quantile_abs_return_%s_to_%s.png' % (factor_name,
                                                                                             self.benchmark_index_code,
                                                                                             'self_def_pool',
                                                                                             self.start_date,
                                                                                             self.end_date)

        plt.savefig(os.path.join(plot_save_dir, save_filename), dpi=200)
        plt.close()
        # plt.close('all')

    # TODO - 画分组超额收益曲线图
    def plot_periods_excess_ret(self, ret, factor_name, plot_save_dir):
        cum_ret = (ret+1).cumprod()

        # plt.style.use('classic')

        # plt.figure(figsize=(20, 10))
        cum_ret.plot(legend=True, fontsize=12, lw=1.5, alpha=0.8, figsize=(20, 10))
        plt.title(factor_name +' quantile excess return', fontsize=12)
        plt.legend(loc=2, bbox_to_anchor=(1, 1), borderaxespad=0, fontsize=12)

        if type(self.universe_pool) == str:
            save_filename = '%s_%s_bench_%s_pool_quantile_excess_return_%s_to_%s.png' % (factor_name,
                                                                                        self.benchmark_index_code,
                                                                                        self.universe_pool,
                                                                                        self.start_date,
                                                                                        self.end_date)
        else:
            save_filename = '%s_%s_bench_%s_pool_quantile_excess_return_%s_to_%s.png' % (factor_name,
                                                                                        self.benchmark_index_code,
                                                                                        'self_def_pool',
                                                                                        self.start_date,
                                                                                        self.end_date)

        plt.savefig(os.path.join(plot_save_dir, save_filename), dpi=200)
        plt.close()
        # plt.close('all')

    # TODO - 画多空收益曲线图
    def plot_periods_top_bottom_ret(self, cum_abs_ret, factor_name, plot_save_dir):
        long_short_plot_save_dir = os.path.join(plot_save_dir, 'long_short')

        # plt.style.use('classic')

        if not os.path.exists(long_short_plot_save_dir):
            os.makedirs(long_short_plot_save_dir)

        top_bottom = cum_abs_ret[['top-bot']].copy()

        if top_bottom.iloc[-1, 0] < 1.0:
            top_bottom = cum_abs_ret[['bot-top']].copy()

        # plt.figure(figsize=(20, 10))
        top_bottom.plot(legend=True, fontsize=12, lw=1.5, alpha=0.8, figsize=(20, 10))
        plt.title(factor_name +' quantile excess return', fontsize=12)
        plt.legend(loc=2, bbox_to_anchor=(1, 1), borderaxespad=0, fontsize=12)

        if type(self.universe_pool) == str:
            save_filename = '%s_%s_bench_%s_pool_quantile_long_short_return_%s_to_%s.png' % (factor_name,
                                                                                                       self.benchmark_index_code,
                                                                                                       self.universe_pool,
                                                                                                       self.start_date,
                                                                                                       self.end_date)
        else:
            save_filename = '%s_%s_bench_%s_pool_quantile_long_short_return_%s_to_%s.png' % (factor_name,
                                                                                                       self.benchmark_index_code,
                                                                                                       'self_def_pool',
                                                                                                       self.start_date,
                                                                                                       self.end_date)

        # plt.savefig(os.path.join(plot_save_dir, save_filename), dpi=200)
        plt.savefig(os.path.join(long_short_plot_save_dir, save_filename), dpi=200)

        plt.close()
        # plt.close('all')

    # TODO - 单因子汇总分析
    def one_factor_analysis(self,
                            factor_name,
                            analysis_result_cache_dir,
                            fillna=False,
                            winsorize=False,
                            normalization=False,
                            drop_duplicates=True):
        # TODO - 加载分析数据
        st = time()
        labeled_analysis_data = self.load_and_concat_factor_data(factor_name)
        et = time()
        print(factor_name, 'load and concat factor data done, time spent is %.5f sec.' % (et-st))

        # TODO - 数据过滤
        st = time()
        filltered_analysis_data = self.fillter_analysis_data(labeled_analysis_data)

        # TODO - 释放一些空间
        del labeled_analysis_data

        et = time()
        print(factor_name, 'filter data done, time spent is %.5f sec.' % (et-st))

        # TODO - 数据预处理
        st = time()
        processed_analysis_data = \
            self.analysis_data_processed(filltered_analysis_data,
                                         factor_name=factor_name,
                                         fillna=fillna,
                                         winsorize=winsorize,
                                         normalization=normalization,
                                         drop_duplicates=drop_duplicates)

        # TODO - 释放一些空间
        del filltered_analysis_data

        et = time()
        print(factor_name, 'process data done, time spent is %.5f sec.' % (et-st))

        ######
        if type(self.universe_pool) == str:
            writer = pd.ExcelWriter(os.path.join(analysis_result_cache_dir,
                                                 factor_name + '_' +
                                                 self.universe_pool + '_pool_' +
                                                 self.benchmark_index_code + '_bench_' +
                                                 '_analysis_result.xlsx'))
        else:
            writer = pd.ExcelWriter(os.path.join(analysis_result_cache_dir,
                                                 factor_name +
                                                 '_self_def_pool_' +
                                                 self.benchmark_index_code + '_bench_' +
                                                 '_analysis_result.xlsx'))

        # TODO - 回归分析
        st = time()
        if self.is_do_regress_analysis:
            reg_result, reg_stats = self.factor_reg_return(processed_analysis_data,
                                                           factor_name=factor_name,
                                                           industry_type=self.industry_type,
                                                           method='OLS')

            # 保存回归统计数据
            reg_stats = pd.DataFrame(pd.Series(reg_stats), columns=['value'])
            reg_stats.index.name = 'statstics'

            reg_stats.to_excel(writer, 'reg_stats')
            reg_result.to_excel(writer, 'reg_result')

            et = time()
            print(factor_name, 'factor regression analysis finish, time spent is %.5f sec.' % (et-st))
        else:
            reg_stats = None
            reg_result = None

        # TODO - IC分析
        st = time()
        ic,\
        ic_stats = self.cal_factor_ic(processed_analysis_data, factor_name=factor_name)

        # 保存IC统计数据
        ic_stats = pd.DataFrame(pd.Series(ic_stats), columns=['value'])
        ic_stats.index.name = 'statstics'

        ic_stats.to_excel(writer, 'ic_stats')
        ic.to_excel(writer, 'ic_result')

        et = time()
        print(factor_name, 'factor IC analysis finish, time spent is %.5f sec.' % (et-st))

        # TODO - 分组收益分析
        st = time()

        abs_ret, \
        excess_ret,\
        cum_abs_ret, \
        cum_excess_ret, \
        position_data, \
        quantile_result_stats = self.cal_quantile_return(label_factor_data=processed_analysis_data,
                                                         benchmark_index_ret=self.index_data,
                                                         factor_name=factor_name,
                                                         rebalance_date=self.rebalance_date_data,
                                                         group_quantile=self.group_quantile,
                                                         neutral_group_quantile=self.neutral_group_quantile,
                                                         group_method=self.group_method,
                                                         weight_method=self.weight_method)

        # TODO - 释放一些空间
        del processed_analysis_data

        quantile_result_stats.to_excel(writer, 'quantile_result_stats')
        writer.save()
        writer.close()

        et = time()
        print(factor_name, 'factor quantile group analysis finish, time spent is %.5f sec.' % (et - st))

        # TODO - 保存分组收益分析数据
        if self.is_save_quantile_detail:
            st = time()
            # TODO - 保存分组收益细节数据
            if type(self.universe_pool) == str:
                writer_detail = pd.ExcelWriter(os.path.join(analysis_result_cache_dir,
                                                            factor_name + '_factor_groupby_' +
                                                            self.group_method + '_' +
                                                            self.weight_method + '_weight_' +
                                                            self.universe_pool + '_pool_' +
                                                            self.benchmark_index_code + '_bench_' +
                                                            str(self.rebalance_type) + '_quantile_detail_result.xlsx'))
            else:
                writer_detail = pd.ExcelWriter(os.path.join(analysis_result_cache_dir,
                                                            factor_name + '_factor_groupby_' +
                                                            self.group_method + '_' +
                                                            self.weight_method + '_weight_' +
                                                            '_self_def_pool_' +
                                                            self.benchmark_index_code + '_bench_' +
                                                            str(self.rebalance_type) + '_quantile_detail_result.xlsx'))

            abs_ret.to_excel(writer_detail, 'abs_return_result')
            cum_abs_ret.to_excel(writer_detail, 'cum_abs_return_result')
            excess_ret.to_excel(writer_detail, 'excess_return_result')
            cum_excess_ret.to_excel(writer_detail, 'cum_excess_return_result')
            position_data.to_excel(writer_detail,'holding_detail')
            writer_detail.save()
            writer_detail.close()

            et = time()
            print(factor_name, 'factor quantile group analysis result saved, time spent is %.5f sec.' % (et - st))
        ##################################

        del position_data
        # del cum_abs_ret
        del cum_excess_ret
        gc.collect()

        # print('factor quantile new_test finish!')
        print(factor_name, 'save single first-step factor analysis done!')

        return reg_stats, reg_result, ic, ic_stats, abs_ret, cum_abs_ret, excess_ret, quantile_result_stats

    # TODO - 因子列表汇总分析
    def factor_list_analysis_main(self,
                                  factor_name_list,
                                  summary_result_filename_prefix,
                                  summary_result_save_dir,
                                  single_factor_analysis_result_cache_dir,
                                  single_factor_plot_save_dir=None,
                                  **kwargs):
        '''
        :param factor_name_list: 因子名称列表
        :param summary_result_filename_prefix: 所有因子分析信息汇总文件名前缀
        :param summary_result_save_dir: 所有因子分析信息汇总文件夹
        :param single_factor_analysis_result_cache_dir: 单个因子信息输出文件夹
        :param single_factor_plot_save_dir: 单个因子画图信息输出文件夹
        :return:
        '''

        if 'fillna' in kwargs:
            fillna = kwargs['fillna']
        else:
            fillna = False

        if 'winsorize' in kwargs:
            winsorize = kwargs['winsorize']
        else:
            winsorize = False

        if 'normalization' in kwargs:
            normalization = kwargs['normalization']
        else:
            normalization = False

        if 'drop_duplicates' in kwargs:
            drop_duplicates = kwargs['drop_duplicates']
        else:
            drop_duplicates = True

        tst = time()

        ic_data=pd.DataFrame()
        reg_return=pd.DataFrame()
        reg_stats_data={}
        ic_stats_data={}

        annual_return=pd.DataFrame()
        annual_vol=pd.DataFrame()
        SR=pd.DataFrame()
        winrate=pd.DataFrame()
        max_draw=pd.DataFrame()

        excess_annual_return=pd.DataFrame()
        TE=pd.DataFrame()
        IR=pd.DataFrame()
        excess_winrate=pd.DataFrame()
        excess_max_draw=pd.DataFrame()

        #TODO 数据加载
        self.initialization()

        for factor_name in factor_name_list:
            # self.factor_name=factor_name
            # factor_name = factor_name_list[2]
            try:
                tmp_st = time()
                # print(factor_name)

                reg_stats, \
                reg_result, \
                ic, \
                ic_stats, \
                ret, \
                cum_abs_ret, \
                excess_ret, \
                quantile_result_stats = self.one_factor_analysis(factor_name,
                                                                 single_factor_analysis_result_cache_dir,
                                                                 fillna=fillna,
                                                                 winsorize=winsorize,
                                                                 normalization=normalization,
                                                                 drop_duplicates=drop_duplicates)

                # TODO 分组收益净值曲线图
                if single_factor_plot_save_dir is not None:
                    st = time()

                    if self.is_plot_quantile_abs_return:
                        self.plot_periods_abs_ret(ret, factor_name=factor_name, plot_save_dir=single_factor_plot_save_dir)

                    self.plot_periods_excess_ret(excess_ret, factor_name=factor_name, plot_save_dir=single_factor_plot_save_dir)

                    self.plot_periods_top_bottom_ret(cum_abs_ret, factor_name=factor_name, plot_save_dir=single_factor_plot_save_dir)

                    et = time()
                    print(factor_name, 'get figure done, time spent is %.5f sec.' % (et-st))

                # TODO 分组收益净值曲线图
                ic_data[factor_name] = ic['ic'].copy()

                if self.is_do_regress_analysis:
                    reg_return[factor_name] = reg_result['factor_return'].copy()
                    reg_stats_data[factor_name] = reg_stats

                ic_stats_data[factor_name] = ic_stats['value']
                annual_return[factor_name] = quantile_result_stats['annual_return'].copy()
                annual_vol[factor_name] = quantile_result_stats['annual_vol'].copy()
                SR[factor_name] = quantile_result_stats['SR'].copy()
                winrate[factor_name] = quantile_result_stats['winrate'].copy()
                max_draw[factor_name] = quantile_result_stats['max_draw'].copy()
                excess_annual_return[factor_name] = quantile_result_stats['excess_annual_return'].copy()
                TE[factor_name] = quantile_result_stats['TE'].copy()
                IR[factor_name] = quantile_result_stats['IR'].copy()
                excess_winrate[factor_name] = quantile_result_stats['excess_winrate'].copy()
                excess_max_draw[factor_name] = quantile_result_stats['excess_max_draw'].copy()

                # TODO - 释放一些空间
                del cum_abs_ret
                del reg_stats
                del reg_result
                del ic
                del ic_stats
                del ret
                del excess_ret
                del quantile_result_stats
                gc.collect()

                tmp_et = time()
                print(factor_name+' analysis all done, time spend is : %.4f sec' % (tmp_et-tmp_st))
            except Exception as e:
                print(factor_name+ ' analysis, error :')
                print(e)
                pass

            print()

        st = time()
        save_filename_prefix = summary_result_filename_prefix + '_' + datetime.datetime.now().strftime('%Y-%m-%d')

        ic_data.to_excel(os.path.join(summary_result_save_dir, save_filename_prefix + '_factor_analysis_ic_data.xlsx'))

        if self.is_do_regress_analysis:
            reg_return.to_excel(os.path.join(summary_result_save_dir, save_filename_prefix + '_factor_analysis_reg_return.xlsx'))

        writer = pd.ExcelWriter(os.path.join(summary_result_save_dir, save_filename_prefix + '_factor_analysis_statistic.xlsx'))

        if self.is_do_regress_analysis:
            reg_stats_data = pd.DataFrame(reg_stats_data).T
            reg_stats_data.index.name = 'factor_name'
            reg_stats_data.to_excel(writer, '回归分析统计量')

        ######
        ic_stats_data = pd.DataFrame(ic_stats_data).T
        ic_stats_data.index.name = 'factor_name'
        ic_stats_data.columns.name =  None
        ic_stats_data.to_excel(writer, 'IC检验统计量')

        annual_return = pd.DataFrame(annual_return).T
        annual_return.index.name = 'factor_name'
        annual_return.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + annual_return.columns[self.group_quantile:].tolist()
        annual_return.to_excel(writer, '分组年化收益率')

        annual_vol = pd.DataFrame(annual_vol).T
        annual_vol.index.name = 'factor_name'
        annual_vol.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + annual_vol.columns[self.group_quantile:].tolist()
        annual_vol.to_excel(writer, '分组年化波动率')

        SR = pd.DataFrame(SR).T
        SR.index.name = 'factor_name'
        SR.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + SR.columns[self.group_quantile:].tolist()
        SR.to_excel(writer, '夏普比率')

        winrate = pd.DataFrame(winrate).T
        winrate.index.name = 'factor_name'
        winrate.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + winrate.columns[self.group_quantile:].tolist()
        winrate.to_excel(writer, '胜率')

        max_draw = pd.DataFrame(max_draw).T
        max_draw.index.name = 'factor_name'
        max_draw.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + max_draw.columns[self.group_quantile:].tolist()
        max_draw.T.to_excel(writer, '最大回撤')

        excess_annual_return = pd.DataFrame(excess_annual_return).T
        excess_annual_return.index.name = 'factor_name'
        excess_annual_return.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + excess_annual_return.columns[self.group_quantile:].tolist()
        excess_annual_return.to_excel(writer, '年化超额收益率')

        track_error = pd.DataFrame(TE).T
        track_error.index.name = 'factor_name'
        track_error.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + track_error.columns[self.group_quantile:].tolist()
        track_error.to_excel(writer, '分组超额收益波动率')

        IR = pd.DataFrame(IR).T
        IR.index.name = 'factor_name'
        IR.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + IR.columns[self.group_quantile:].tolist()
        IR.to_excel(writer, '分组信息比率')

        excess_winrate = pd.DataFrame(excess_winrate).T
        excess_winrate.index.name = 'factor_name'
        excess_winrate.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + excess_winrate.columns[self.group_quantile:].tolist()
        excess_winrate.to_excel(writer, '超额胜率')

        excess_max_draw = pd.DataFrame(excess_max_draw).T
        excess_max_draw.index.name = 'factor_name'
        excess_max_draw.columns = \
            ['g%d' % (g + 1) for g in range(self.group_quantile)] + excess_max_draw.columns[self.group_quantile:].tolist()
        excess_max_draw.to_excel(writer, '超额最大回撤')
        writer.save()
        writer.close()

        del ic_stats_data
        del annual_return
        del annual_vol
        del SR
        del winrate
        del max_draw
        del excess_annual_return
        del track_error
        del IR
        del excess_winrate
        del excess_max_draw
        gc.collect()

        et = time()
        print('save final analysis data done, time spent is %.5f sec.' % (et - st))

        tet = time()
        print('analysis, total time spend is : %.4f sec.\n' % (tet-tst))

if __name__ == '__main__':
    # start_date = '2008-01-01'
    start_date = '2010-01-01'
    end_date = '2019-05-31'

    analysis_api = FactorAnalysisPipeline(start_date=start_date,
                                          end_date=end_date,
                                          group_quantile=10,
                                          liquidity_filter='money',
                                          liquidity_filter_period=21,
                                          rebalance_type='startegy',
                                          universe_pool='all',
                                          benchmark_index_code='000905',
                                          factor_database='public',
                                          group_method='sw1',
                                          weight_method='cir_cap')

    factor_name_list = ['growth_roic_yoy',
                        'growth_total_liability_yoy',
                        'growth_marginal_roe',
                        'growth_marginal_roic',
                        'growth_roic_change',
                        'growth_gross_margin_delta']

    # analysis_name = 'csi500_20100101_20190701'
    analysis_name = 'all_20190101_20190531'
    summary_result_filename_prefix = 'growth'
    summary_result_save_dir = '/db/zg_data/zbc/factor_analysis/new_test/' + analysis_name
    single_factor_analysis_result_cache_dir = os.path.join(summary_result_save_dir, 'single_factor_result')
    single_factor_plot_save_dir = os.path.join(single_factor_analysis_result_cache_dir, 'plot')

    if not os.path.exists(summary_result_save_dir):
        os.makedirs(summary_result_save_dir)

    if not os.path.exists(single_factor_analysis_result_cache_dir):
        os.makedirs(single_factor_analysis_result_cache_dir)

    if not os.path.exists(single_factor_plot_save_dir):
        os.makedirs(single_factor_plot_save_dir)

    kwargs = dict(
        fillna = False,
        winsorize = False,
        normalization = False,
        drop_duplicates = True,
    )

    analysis_api.factor_list_analysis_main(factor_name_list=factor_name_list,
                                           summary_result_filename_prefix=summary_result_filename_prefix,
                                           summary_result_save_dir=summary_result_save_dir,
                                           single_factor_analysis_result_cache_dir=single_factor_analysis_result_cache_dir,
                                           single_factor_plot_save_dir=single_factor_plot_save_dir,
                                           **kwargs)
