#!/usr/bin/python
# -*-coding:utf-8-*-

'''批量分析因子V2: 加入了描述性统计画图'''

import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# plt.switch_backend('agg')

plt.rcParams['axes.unicode_minus'] = False
# plt.style.use('ggplot')

import os
import gc
import pandas as pd
import numpy as np
from time import time
import datetime

from zg_factor_analysis_module.pipeline_template.zbc_factor_analysis_pipeline import FactorAnalysisPipeline

class FactorAnalysisPipelineV2(FactorAnalysisPipeline):
    # TODO - 画多空收益曲线图
    def plot_periods_top_bottom_ret(self, cum_abs_ret, factor_name, plot_save_dir):
        long_short_plot_save_dir = plot_save_dir

        top_bottom = cum_abs_ret[['top-bot']].copy()

        if top_bottom.iloc[-1, 0] < 1.0:
            top_bottom = cum_abs_ret[['bot-top']].copy()

        top_bottom.plot(legend=True, fontsize=12, lw=1.5, alpha=0.8, figsize=(20, 10))
        plt.title(factor_name + ' quantile excess return', fontsize=12)
        plt.legend(loc=2, bbox_to_anchor=(1, 1), borderaxespad=0, fontsize=12)

        if type(self.universe_pool) == str:
            save_filename = '%s_%s_bench_%s_pool_quantile_long_short_return_%s_to_%s.png' % (factor_name,
                                                                                             self.benchmark_index_code,
                                                                                             self.universe_pool,
                                                                                             self.start_date,
                                                                                             self.end_date)
        else:
            save_filename = '%s_%s_bench_%s_pool_quantile_long_short_return_%s_to_%s.png' % (factor_name,
                                                                                             self.benchmark_index_code,
                                                                                             'self_def_pool',
                                                                                             self.start_date,
                                                                                             self.end_date)

        plt.savefig(os.path.join(long_short_plot_save_dir, save_filename), dpi=200)

        plt.close()
        # plt.close('all')

    ####
    # TODO - 描述性统计
    # pool = '000905'
    # save_dir = '/db/zg_data/zbc/buffer'
    # factor_data = processed_analysis_data.copy()
    #
    # ind_name = 'sw1_name'

    # TODO - 画因子hist分布情况
    def _plot_factor_hist(self, df, factor_name, plot_save_dir, pool=None):
        df = df.copy()
        df = df.dropna()

        # TODO - 去掉极值
        df = df[(df[factor_name] >= df[factor_name].quantile(0.01)) &
                (df[factor_name] <= df[factor_name].quantile(0.99))]

        # TODO 画某日的因子分布图
        plt.figure(figsize=(16, 10))

        plt.hist(df[factor_name],
                 bins=min(df.shape[0] // 10, 100),
                 color='steelblue',
                 edgecolor='k',
                 alpha=0.9)

        plt.tick_params(top='off', right='off')

        if pool is None:
            pool = 'whole_market'

        plt.title('factor distribution in pool ' + pool, fontsize=20)

        plt.xlabel('factor value', fontsize=15)

        plt.ylabel('count', fontsize=15)

        plt.tick_params(labelsize=10)

        plt.savefig(os.path.join(plot_save_dir, factor_name + '_factor_value_distribution.png'))

        plt.close()

    # TODO - 画因子覆盖率情况
    def _plot_factor_coverage(self, df, factor_name, plot_save_dir, pool=None):
        # df = factor_data.copy()
        df = df.copy()
        df = df.dropna()

        # TODO 画因子覆盖情况
        data_count = df.groupby('date')['stock_code'].count()

        plt.figure(figsize=(16, 10))

        data_count.plot(legend='False', fontsize=10, alpha=0.9)

        plt.tick_params(top='off', right='off')

        if pool is None:
            pool = 'whole_market'

        plt.title('factor filled number in pool %s' % pool, fontsize=20)

        plt.xlabel('date', fontsize=15)

        plt.ylabel('stock number', fontsize=15)

        plt.tick_params(labelsize=10)

        plt.savefig(os.path.join(plot_save_dir, factor_name + '_factor_%s_pool_coverage.png' % pool))

        plt.close()

    # TODO - 画因子行业分布情况
    def _plot_factor_ind_stats(self, df, factor_name, plot_save_dir, pool=None, ind_name='ci1_name'):
        # TODO - 画因子行业分布情况
        df = df.copy()

        ind_mean = df.groupby([ind_name])[factor_name].mean()
        ind_mean.name = 'ind_mean'

        ind_median = df.groupby([ind_name])[factor_name].median()
        ind_median.name = 'ind_median'

        ind_q1 = df.groupby([ind_name])[factor_name].quantile(0.25)
        ind_q1.name = 'ind_q1'

        ind_q3 = df.groupby([ind_name])[factor_name].quantile(0.75)
        ind_q3.name = 'ind_q3'

        df = pd.concat([ind_mean, ind_median, ind_q1, ind_q3], axis=1)  # ,ind_min,ind_max
        df = df.sort_values(by='ind_mean')

        # df.to_excel(os.path.join(save_dir, factor_name + ' ind stats.xlsx'))

        df.plot(kind='bar', edgecolor='k', figsize=(40, 20), fontsize=30, alpha=0.9)

        plt.tick_params(top='off', right='off', labelsize=25)

        plt.legend(loc=2, bbox_to_anchor=(1, 1), borderaxespad=0, fontsize=25)

        if pool is None:
            pool = 'whole_market'

        plt.title('factor ind stats in pool ' + pool, fontsize=40)

        plt.xlabel('', fontsize=35)

        plt.ylabel('factor value', fontsize=35)

        plt.savefig(os.path.join(plot_save_dir, factor_name + '_factor_%s_pool_ind_stats.png' % pool), dpi=200)

        plt.close()

    # TODO - 画因子市值分布情况
    def _plot_factor_cap_stats(self, df, factor_name, plot_save_dir, pool=None, group_num=10):
        df = df.copy()
        # df = factor_data.copy()

        df = df.set_index(['date', 'stock_code'])

        df = df.rename(columns={'scale_circulate_market_size': 'cap'})

        # TODO - log市值还原
        df['cap'] = np.exp(df['cap'])

        df['factor_q'] = df.groupby('date')[factor_name].apply(
            lambda x: pd.qcut(x.rank(method='first'), group_num, labels=False))

        df = df.reset_index()

        cap_mean = df.groupby(['factor_q'])['cap'].mean()
        cap_mean.name = 'cap_mean'

        cap_median = df.groupby(['factor_q'])['cap'].median()
        cap_median.name = 'cap_median'

        cap_q1 = df.groupby(['factor_q'])['cap'].quantile(0.25)
        cap_q1.name = 'cap_q1'

        cap_q3 = df.groupby(['factor_q'])['cap'].quantile(0.75)
        cap_q3.name = 'cap_q3'

        df_plot = pd.concat([cap_mean, cap_median, cap_q1, cap_q3], axis=1)

        # df.to_excel(os.path.join(save_dir, factor_name + ' cap stats.xlsx'))
        # plt.style.use('ggplot')

        df_plot.plot(kind='bar', edgecolor='k', alpha=0.9, figsize=(25, 12), fontsize=15)

        plt.tick_params(top='off', right='off')

        plt.legend(loc=2, bbox_to_anchor=(1, 1), borderaxespad=0, fontsize=15)

        if pool is None:
            pool = 'whole_market'

        plt.title('factor cap stats in pool ' + pool, fontsize=20)

        plt.xlabel('factor group', fontsize=15)

        plt.ylabel('stock market cap', fontsize=15)

        plt.tick_params(labelsize=10)

        plt.savefig(os.path.join(plot_save_dir, factor_name + '_factor_%s_pool_cap_stats.png' % pool))

        plt.close()

    def factor_describe_analysis(self, factor_data, factor_name, plot_save_dir, pool=None, cap_group_num=10, ind_name='ci1_name'):
        factor_data = factor_data.copy()

        factor_data = factor_data.dropna()

        self._plot_factor_hist(df=factor_data,
                               factor_name=factor_name,
                               pool=pool,
                               plot_save_dir=plot_save_dir)

        self._plot_factor_coverage(df=factor_data,
                                   factor_name=factor_name,
                                   pool=pool,
                                   plot_save_dir=plot_save_dir)

        self._plot_factor_ind_stats(df=factor_data,
                                    factor_name=factor_name,
                                    pool=pool,
                                    plot_save_dir=plot_save_dir,
                                    ind_name=ind_name)

        self._plot_factor_cap_stats(df=factor_data,
                                    factor_name=factor_name,
                                    pool=pool,
                                    plot_save_dir=plot_save_dir,
                                    group_num=cap_group_num)

    #TODO 单因子汇总分析
    def one_factor_analysis(self,
                            factor_name,
                            analysis_result_cache_dir,
                            descibes_analysis_result_cache_dir,
                            fillna=False,
                            winsorize=False,
                            normalization=False,
                            drop_duplicates=True,
                            is_save_position_data=False):
        # TODO - 加载分析数据
        st = time()
        labeled_analysis_data = self.load_and_concat_factor_data(factor_name)
        et = time()
        print(factor_name, 'load and concat factor data done, time spent is %.5f sec.' % (et-st))

        # TODO - 数据过滤
        st = time()
        filltered_analysis_data = self.fillter_analysis_data(labeled_analysis_data)

        # TODO - 释放一些空间
        del labeled_analysis_data

        et = time()
        print(factor_name, 'filter data done, time spent is %.5f sec.' % (et-st))

        # TODO - 描述性统计(new added)
        st = time()
        self.factor_describe_analysis(factor_data=filltered_analysis_data,
                                      factor_name=factor_name,
                                      plot_save_dir=descibes_analysis_result_cache_dir,
                                      pool=self.universe_pool,
                                      cap_group_num=10,
                                      ind_name=self.industry_type+'_name')
        et = time()
        print(factor_name, 'describe statistics analysis done, time spent is %.5f sec.' % (et-st))

        # TODO - 数据预处理
        st = time()
        processed_analysis_data = \
            self.analysis_data_processed(filltered_analysis_data,
                                         factor_name=factor_name,
                                         fillna=fillna,
                                         winsorize=winsorize,
                                         normalization=normalization,
                                         drop_duplicates=drop_duplicates)

        # TODO - 释放一些空间
        del filltered_analysis_data

        et = time()
        print(factor_name, 'process data done, time spent is %.5f sec.' % (et-st))

        ######
        if type(self.universe_pool) == str:
            writer = pd.ExcelWriter(os.path.join(analysis_result_cache_dir,
                                                 factor_name + '_' +
                                                 self.universe_pool + '_pool_' +
                                                 self.benchmark_index_code + '_bench_' +
                                                 'analysis_result.xlsx'))
        else:
            writer = pd.ExcelWriter(os.path.join(analysis_result_cache_dir,
                                                 factor_name +
                                                 '_self_def_pool_' +
                                                 self.benchmark_index_code + '_bench_' +
                                                 'analysis_result.xlsx'))

        # TODO - 回归分析
        st = time()
        if self.is_do_regress_analysis:
            reg_result, reg_stats = self.factor_reg_return(processed_analysis_data,
                                                           factor_name=factor_name,
                                                           industry_type=self.industry_type,
                                                           method='OLS')

            # 保存回归统计数据
            reg_stats = pd.DataFrame(pd.Series(reg_stats), columns=['value'])
            reg_stats.index.name = 'statstics'

            reg_stats.to_excel(writer, 'reg_stats')
            reg_result.to_excel(writer, 'reg_result')

            et = time()
            print(factor_name, 'factor regression analysis finish, time spent is %.5f sec.' % (et-st))
        else:
            reg_stats = None
            reg_result = None

        # TODO - IC分析
        st = time()
        ic,\
        ic_stats = self.cal_factor_ic(processed_analysis_data,
                                      factor_name=factor_name,
                                      periods=self.rebalance_periods,   # unit: days
                                      method='rank')

        # 保存IC统计数据
        ic_stats = pd.DataFrame(pd.Series(ic_stats), columns=['value'])
        ic_stats.index.name = 'statstics'

        ic_stats.to_excel(writer, 'ic_stats')
        ic.to_excel(writer, 'ic_result')

        et = time()
        print(factor_name, 'factor IC analysis finish, time spent is %.5f sec.' % (et-st))

        # TODO - 分组收益分析
        st = time()

        abs_ret, \
        excess_ret,\
        cum_abs_ret, \
        cum_excess_ret, \
        position_data, \
        quantile_result_stats = self.cal_quantile_return(label_factor_data=processed_analysis_data,
                                                         benchmark_index_ret=self.index_data,
                                                         factor_name=factor_name,
                                                         rebalance_date=self.rebalance_date_data,
                                                         group_quantile=self.group_quantile,
                                                         neutral_group_quantile=self.neutral_group_quantile,
                                                         group_method=self.group_method,
                                                         weight_method=self.weight_method)

        # TODO - 释放一些空间
        del processed_analysis_data

        quantile_result_stats.to_excel(writer, 'quantile_result_stats')
        writer.save()
        writer.close()

        # TODO - 保存每日的超额收益率数据
        excess_ret.to_excel(os.path.join(analysis_result_cache_dir, factor_name + '_daily_excess_return.xlsx'))

        # TODO - 保存每日的绝对收益率数据
        abs_ret.to_excel(os.path.join(analysis_result_cache_dir, factor_name + '_daily_abs_return.xlsx'))

        # TODO - 保存持仓数据
        if is_save_position_data:
            pos_analysis_result_cache_dir = os.path.join(analysis_result_cache_dir, 'position')

            if not os.path.exists(pos_analysis_result_cache_dir):
                os.makedirs(pos_analysis_result_cache_dir)

            tmp_st = time()
            position_data.reset_index().to_hdf(os.path.join(pos_analysis_result_cache_dir, factor_name + '_pos_data.h5'),
                                               key=factor_name + '_pos_data')
            tmp_et = time()
            print('save position time is %.4f sec.' % (tmp_et-tmp_st))

        et = time()
        print(factor_name, 'factor quantile group analysis finish, time spent is %.5f sec.' % (et - st))

        # TODO - 保存分组收益分析数据
        if self.is_save_quantile_detail:
            st = time()
            # TODO - 保存分组收益细节数据
            if type(self.universe_pool) == str:
                writer_detail = pd.ExcelWriter(os.path.join(analysis_result_cache_dir,
                                                            factor_name + '_factor_groupby_' +
                                                            self.group_method + '_' +
                                                            self.weight_method + '_weight_' +
                                                            self.universe_pool + '_pool_' +
                                                            self.benchmark_index_code + '_bench_' +
                                                            str(self.rebalance_type) + '_quantile_detail_result.xlsx'))
            else:
                writer_detail = pd.ExcelWriter(os.path.join(analysis_result_cache_dir,
                                                            factor_name + '_factor_groupby_' +
                                                            self.group_method + '_' +
                                                            self.weight_method + '_weight_' +
                                                            '_self_def_pool_' +
                                                            self.benchmark_index_code + '_bench_' +
                                                            str(self.rebalance_type) + '_quantile_detail_result.xlsx'))

            abs_ret.to_excel(writer_detail, 'abs_return_result')
            cum_abs_ret.to_excel(writer_detail, 'cum_abs_return_result')
            # excess_ret.to_excel(writer_detail, 'excess_return_result')
            cum_excess_ret.to_excel(writer_detail, 'cum_excess_return_result')

            position_data.to_excel(writer_detail,'holding_detail')

            writer_detail.save()
            writer_detail.close()

            et = time()
            print(factor_name, 'factor quantile group analysis result saved, time spent is %.5f sec.' % (et - st))
        ##################################

        del position_data
        # del cum_abs_ret
        del cum_excess_ret
        gc.collect()

        # print('factor quantile new_test finish!')
        print(factor_name, 'save single first-step factor analysis done!')

        return reg_stats, reg_result, ic, ic_stats, abs_ret, cum_abs_ret, excess_ret, quantile_result_stats

    # TODO - 因子列表汇总分析
    def factor_list_analysis_main(self,
                                  factor_name_list,
                                  summary_result_filename_prefix,
                                  summary_result_save_dir,
                                  single_factor_analysis_result_cache_dir,
                                  single_factor_plot_save_dir=None,
                                  **kwargs):
        '''
        :param factor_name_list: 因子名称列表
        :param summary_result_filename_prefix: 所有因子分析信息汇总文件名前缀
        :param summary_result_save_dir: 所有因子分析信息汇总文件夹
        :param single_factor_analysis_result_cache_dir: 单个因子信息输出文件夹
        :param single_factor_plot_save_dir: 单个因子画图信息输出文件夹
        :return:
        '''

        if 'fillna' in kwargs:
            fillna = kwargs['fillna']
        else:
            fillna = False

        if 'winsorize' in kwargs:
            winsorize = kwargs['winsorize']
        else:
            winsorize = False

        if 'normalization' in kwargs:
            normalization = kwargs['normalization']
        else:
            normalization = False

        if 'drop_duplicates' in kwargs:
            drop_duplicates = kwargs['drop_duplicates']
        else:
            drop_duplicates = True

        # TODO - 是否保存持仓数据
        if 'is_save_position_data' in kwargs:
            is_save_position_data = kwargs['is_save_position_data']
        else:
            is_save_position_data = False

        tst = time()

        ic_data=pd.DataFrame()
        reg_return=pd.DataFrame()
        reg_stats_data={}
        ic_stats_data={}

        annual_return=pd.DataFrame()
        annual_vol=pd.DataFrame()
        SR=pd.DataFrame()
        winrate=pd.DataFrame()
        max_draw=pd.DataFrame()

        excess_annual_return=pd.DataFrame()
        TE=pd.DataFrame()
        IR=pd.DataFrame()
        excess_winrate=pd.DataFrame()
        excess_max_draw=pd.DataFrame()

        #TODO 数据加载
        self.initialization()

        for factor_name in factor_name_list:
            # self.factor_name=factor_name
            # factor_name = factor_name_list[2]
            try:
                tmp_st = time()
                # print(factor_name)

                if single_factor_plot_save_dir is not None:
                    all_plot_save_dir = os.path.join(single_factor_plot_save_dir, factor_name)

                    if not os.path.exists(all_plot_save_dir):
                        os.makedirs(all_plot_save_dir)
                else:
                    all_plot_save_dir = None

                reg_stats, \
                reg_result, \
                ic, \
                ic_stats, \
                ret, \
                cum_abs_ret, \
                excess_ret, \
                quantile_result_stats = self.one_factor_analysis(factor_name=factor_name,
                                                                 analysis_result_cache_dir=single_factor_analysis_result_cache_dir,
                                                                 descibes_analysis_result_cache_dir=all_plot_save_dir,
                                                                 fillna=fillna,
                                                                 winsorize=winsorize,
                                                                 normalization=normalization,
                                                                 drop_duplicates=drop_duplicates,
                                                                 is_save_position_data=is_save_position_data)

                # TODO 分组收益净值曲线图
                if all_plot_save_dir is not None:
                    st = time()

                    if self.is_plot_quantile_abs_return:
                        self.plot_periods_abs_ret(ret, factor_name=factor_name, plot_save_dir=all_plot_save_dir)

                    self.plot_periods_excess_ret(excess_ret, factor_name=factor_name, plot_save_dir=all_plot_save_dir)

                    self.plot_periods_top_bottom_ret(cum_abs_ret, factor_name=factor_name, plot_save_dir=all_plot_save_dir)

                    et = time()
                    print(factor_name, 'get figure done, time spent is %.5f sec.' % (et-st))

                # TODO 分组收益净值曲线图
                ic_data[factor_name] = ic['ic'].copy()

                if self.is_do_regress_analysis:
                    reg_return[factor_name] = reg_result['factor_return'].copy()
                    reg_stats_data[factor_name] = reg_stats

                ic_stats_data[factor_name] = ic_stats['value']
                annual_return[factor_name] = quantile_result_stats['annual_return'].copy()
                annual_vol[factor_name] = quantile_result_stats['annual_vol'].copy()
                SR[factor_name] = quantile_result_stats['SR'].copy()
                winrate[factor_name] = quantile_result_stats['winrate'].copy()
                max_draw[factor_name] = quantile_result_stats['max_draw'].copy()
                excess_annual_return[factor_name] = quantile_result_stats['excess_annual_return'].copy()
                TE[factor_name] = quantile_result_stats['TE'].copy()
                IR[factor_name] = quantile_result_stats['IR'].copy()
                excess_winrate[factor_name] = quantile_result_stats['excess_winrate'].copy()
                excess_max_draw[factor_name] = quantile_result_stats['excess_max_draw'].copy()

                # TODO - 释放一些空间
                del cum_abs_ret
                del reg_stats
                del reg_result
                del ic
                del ic_stats
                del ret
                del excess_ret
                del quantile_result_stats
                gc.collect()

                tmp_et = time()
                print(factor_name+' analysis all done, time spend is : %.4f sec' % (tmp_et-tmp_st))
            except Exception as e:
                print(factor_name+ ' analysis, error :')
                print(e)
                pass

            print()

        st = time()
        save_filename_prefix = summary_result_filename_prefix + '_' + datetime.datetime.now().strftime('%Y-%m-%d')

        ic_data.to_excel(os.path.join(summary_result_save_dir, save_filename_prefix + '_factor_analysis_ic_data.xlsx'))

        if self.is_do_regress_analysis:
            reg_return.to_excel(os.path.join(summary_result_save_dir, save_filename_prefix + '_factor_analysis_reg_return.xlsx'))

        writer = pd.ExcelWriter(os.path.join(summary_result_save_dir, save_filename_prefix + '_factor_analysis_statistic.xlsx'))

        if self.is_do_regress_analysis:
            reg_stats_data = pd.DataFrame(reg_stats_data).T
            reg_stats_data.index.name = 'factor_name'
            reg_stats_data.to_excel(writer, '回归分析统计量')

        ######
        ic_stats_data = pd.DataFrame(ic_stats_data).T
        ic_stats_data.index.name = 'factor_name'
        ic_stats_data.columns.name =  None
        ic_stats_data.to_excel(writer, 'IC检验统计量')

        annual_return = pd.DataFrame(annual_return).T
        annual_return.index.name = 'factor_name'
        annual_return.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + annual_return.columns[self.group_quantile:].tolist()
        annual_return.to_excel(writer, '分组年化收益率')

        annual_vol = pd.DataFrame(annual_vol).T
        annual_vol.index.name = 'factor_name'
        annual_vol.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + annual_vol.columns[self.group_quantile:].tolist()
        annual_vol.to_excel(writer, '分组年化波动率')

        SR = pd.DataFrame(SR).T
        SR.index.name = 'factor_name'
        SR.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + SR.columns[self.group_quantile:].tolist()
        SR.to_excel(writer, '夏普比率')

        winrate = pd.DataFrame(winrate).T
        winrate.index.name = 'factor_name'
        winrate.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + winrate.columns[self.group_quantile:].tolist()
        winrate.to_excel(writer, '胜率')

        max_draw = pd.DataFrame(max_draw).T
        max_draw.index.name = 'factor_name'
        max_draw.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + max_draw.columns[self.group_quantile:].tolist()
        max_draw.T.to_excel(writer, '最大回撤')

        excess_annual_return = pd.DataFrame(excess_annual_return).T
        excess_annual_return.index.name = 'factor_name'
        excess_annual_return.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + excess_annual_return.columns[self.group_quantile:].tolist()
        excess_annual_return.to_excel(writer, '年化超额收益率')

        track_error = pd.DataFrame(TE).T
        track_error.index.name = 'factor_name'
        track_error.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + track_error.columns[self.group_quantile:].tolist()
        track_error.to_excel(writer, '分组超额收益波动率')

        IR = pd.DataFrame(IR).T
        IR.index.name = 'factor_name'
        IR.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + IR.columns[self.group_quantile:].tolist()
        IR.to_excel(writer, '分组信息比率')

        excess_winrate = pd.DataFrame(excess_winrate).T
        excess_winrate.index.name = 'factor_name'
        excess_winrate.columns = \
            ['g%d' % (g+1) for g in range(self.group_quantile)] + excess_winrate.columns[self.group_quantile:].tolist()
        excess_winrate.to_excel(writer, '超额胜率')

        excess_max_draw = pd.DataFrame(excess_max_draw).T
        excess_max_draw.index.name = 'factor_name'
        excess_max_draw.columns = \
            ['g%d' % (g + 1) for g in range(self.group_quantile)] + excess_max_draw.columns[self.group_quantile:].tolist()
        excess_max_draw.to_excel(writer, '超额最大回撤')
        writer.save()
        writer.close()

        del ic_stats_data
        del annual_return
        del annual_vol
        del SR
        del winrate
        del max_draw
        del excess_annual_return
        del track_error
        del IR
        del excess_winrate
        del excess_max_draw
        gc.collect()

        et = time()
        print('save final analysis data done, time spent is %.5f sec.' % (et - st))

        tet = time()
        print('analysis, total time spend is : %.4f sec.\n' % (tet-tst))


if __name__ == '__main__':
    # start_date = '2008-01-01'
    start_date = '2010-01-01'
    end_date = '2019-05-31'

    analysis_api = FactorAnalysisPipelineV2(start_date=start_date,
                                            end_date=end_date,
                                            group_quantile=10,
                                            liquidity_filter='money',
                                            liquidity_filter_period=21,
                                            rebalance_type='startegy',
                                            universe_pool='all',
                                            benchmark_index_code='000905',
                                            factor_database='public',
                                            group_method='sw1',
                                            weight_method='cir_cap')

    factor_name_list = ['growth_roic_yoy',
                        'growth_total_liability_yoy',
                        'growth_marginal_roe',
                        'growth_marginal_roic',
                        'growth_roic_change',
                        'growth_gross_margin_delta']

    analysis_name = 'all_20190101_20190531'
    summary_result_filename_prefix = 'growth'
    summary_result_save_dir = './factor_analysis/new_test/' + analysis_name
    single_factor_analysis_result_cache_dir = os.path.join(summary_result_save_dir, 'single_factor_result')
    single_factor_plot_save_dir = os.path.join(single_factor_analysis_result_cache_dir, 'plot')

    if not os.path.exists(summary_result_save_dir):
        os.makedirs(summary_result_save_dir)

    if not os.path.exists(single_factor_analysis_result_cache_dir):
        os.makedirs(single_factor_analysis_result_cache_dir)

    if not os.path.exists(single_factor_plot_save_dir):
        os.makedirs(single_factor_plot_save_dir)

    kwargs = dict(
        fillna = False,
        winsorize = False,
        normalization = False,
        drop_duplicates = True,
    )

    analysis_api.factor_list_analysis_main(factor_name_list=factor_name_list,
                                           summary_result_filename_prefix=summary_result_filename_prefix,
                                           summary_result_save_dir=summary_result_save_dir,
                                           single_factor_analysis_result_cache_dir=single_factor_analysis_result_cache_dir,
                                           single_factor_plot_save_dir=single_factor_plot_save_dir,
                                           **kwargs)
