#!/usr/bin/python
# -*-coding:utf-8-*-
import os
import pandas as pd
import gc
from time import time

from zg_factor_analysis_module.pipeline_template.zbc_factor_analysis_pipeline import FactorAnalysisPipeline

class FactorTest(FactorAnalysisPipeline):
    # TODO - 添加因子数据
    def load_and_concat_factor_data(self, factor_data):
        concat_data=pd.concat([self.labeled_analysis_data, factor_data],
                               axis=1,
                               join_axes=[self.labeled_analysis_data.index])

        print('load factor data finish!')

        return concat_data.reset_index()

    # TODO 计算因子分组统计数据
    def cal_quantile_return(self,
                            label_factor_data,
                            benchmark_index_ret,
                            factor_name,
                            rebalance_date,
                            group_quantile=5,
                            neutral_group_quantile=5,
                            group_method='cap',
                            weight_method='ew'):
        '''
        :param label_factor_data: 带标签的因子数
        :param benchmark_index_ret: 基准指数的收益率
        :param factor_name: 因子名称
        :param rebalance_date: 调仓日数据, list
        :param group_quantile: 分组数
        :param group_method: 分组方法, sw1, ci1, cap, other(不中性化)
        :param weight_method: 配权方法, ew, cir_cap, total_cap

        :return:
        abs_ret: DataFrame, 绝对收益
        excess_ret: DataFrame, 超额收益
        cum_abs_ret: DataFrame, 累计绝对收益
        cum_excess_ret: DataFrame, 累计收益
        position_data: DataFrame, 持仓数据
        quantile_result_stats: DataFrame, 分组统计量
        '''

        # # TODO 数据过滤,若caocat_data已经过滤则不需要这一步
        label_factor_data = label_factor_data.set_index(['date', 'stock_code']).copy()
        cal_data = label_factor_data.copy()
        trade_date = sorted(cal_data.index.get_level_values('date').unique().tolist())

        # TODO - 分组
        cal_data = cal_data.reset_index()
        cal_data = cal_data[cal_data['date'].isin(rebalance_date)]
        cal_data = cal_data.set_index(['date', 'stock_code'])

        grouped_data = cal_data.groupby('date').apply(
            lambda x: self._add_group_lebel(x,
                                            factor_name,
                                            method=group_method,
                                            quantile=group_quantile,
                                            neutral_quantile=neutral_group_quantile)
        )
        cal_data['group'] = grouped_data.loc[cal_data.index]

        cal_data['dt'] = cal_data.index.get_level_values('date')
        w_data = cal_data.dropna()
        weights = w_data.groupby('dt').apply(lambda x: self._cal_stock_weight(x, method=weight_method))
        weights.index = weights.index.droplevel('dt')
        cal_data['weight'] = weights.loc[cal_data.index]

        # TODO - 填充每日股票池
        daily_cal_data = cal_data.reset_index().copy()
        # daily_cal_data['date'].unique()

        daily_cal_data = daily_cal_data[daily_cal_data['paused'] == 0.0]
        daily_cal_data = daily_cal_data[daily_cal_data['up_one_line'] == 0.0]
        daily_cal_data = daily_cal_data[daily_cal_data['down_one_line'] == 0.0]
        daily_cal_data = daily_cal_data[['date', 'stock_code', 'group', 'weight']]

        cal_data_list = []
        current_data = pd.DataFrame()
        for td in trade_date:
            # TODO - 选股
            temp = daily_cal_data[daily_cal_data['date'] == td].copy()

            # TODO - 第二天开始计算收益率
            if current_data.shape[0] > 0:
                current_data['date'] = td
                current_data = current_data.set_index(['date', 'stock_code'])

                # TODO - 计算每天收益率
                current_data['quote_rate'] = self.ret_data.loc[current_data.index].copy()
                current_data['quote_rate'] = current_data['quote_rate'].fillna(0.0)

                current_data['weight_return'] = current_data['weight'] * current_data['quote_rate']
                cal_data_list.append(current_data.copy())

                # TODO - 权重变动
                current_data['weight'] = current_data['weight'] * (1+current_data['quote_rate'])

                group_sum = current_data.groupby('group')['weight'].sum()

                current_data['group_sum'] = group_sum.loc[current_data['group']].values
                current_data['weight'] = current_data['weight'] / current_data['group_sum']  # 归一化

                current_data = current_data.reset_index()

            # TODO - 选股当天不计算收益率
            if temp.shape[0] > 0:
                current_data = temp.copy()
                # print(td, 'no return')

        daily_cal_data = pd.concat(cal_data_list, axis=0)
        daily_cal_data = daily_cal_data.drop('group_sum', axis=1)

        cal_data = daily_cal_data.copy()
        del weights, grouped_data, cal_data_list, daily_cal_data

        # TODO 计算分组收益率
        abs_ret = cal_data.groupby(['date', 'group'])['weight_return'].sum()
        abs_ret = abs_ret.reset_index().pivot(index='date', columns='group', values='weight_return')
        abs_ret['top-bot'] = abs_ret[group_quantile] - abs_ret[1]
        abs_ret['bot-top'] = abs_ret[1] - abs_ret[group_quantile]

        # index_ret = self.get_daily_index_trade_data(bench)

        abs_ret['bench'] = benchmark_index_ret.loc[abs_ret.index, 'quote_rate']
        # cum_abs_ret = (1 + abs_ret).cumprod()

        excess_ret = abs_ret.iloc[:, :group_quantile].apply(lambda x: x - abs_ret['bench'])
        # cum_excess_ret = (1 + excess_ret).cumprod()

        ####
        quantile_result_stats = {}
        quantile_result_stats['annual_return'] = abs_ret.apply(lambda x: self._calc_annual_return(x))
        quantile_result_stats['annual_vol'] = abs_ret.apply(lambda x: self._calc_annual_volatility(x))
        quantile_result_stats['SR'] = abs_ret.apply(lambda x: self._calc_sharpe_ratio(x))

        quantile_result_stats['excess_annual_return'] = excess_ret.apply(lambda x: self._calc_annual_return(x))
        quantile_result_stats['TE'] = excess_ret.apply(lambda x: self._calc_annual_volatility(x))
        quantile_result_stats['IR'] = excess_ret.apply(lambda x: self._calc_sharpe_ratio(x))

        # TODO - 输出格式化
        # abs_ret.columns = ['g%d' % (g+1) for g in range(group_quantile)] + abs_ret.columns[group_quantile:].tolist()
        # excess_ret.columns = ['g%d' % (g+1) for g in range(group_quantile)] + excess_ret.columns[group_quantile:].tolist()
        # cum_abs_ret.columns = ['g%d' % (g+1) for g in range(group_quantile)] + cum_abs_ret.columns[group_quantile:].tolist()
        # cum_excess_ret.columns = ['g%d' % (g+1) for g in range(group_quantile)] + cum_excess_ret.columns[group_quantile:].tolist()

        quantile_result_stats = pd.DataFrame(quantile_result_stats)
        quantile_result_stats.index.name = 'statstics'

        return quantile_result_stats

    #TODO 单因子汇总分析
    def one_factor_analysis(self,
                            factor_data,
                            factor_name,
                            fillna=False,
                            winsorize=False,
                            normalization=False,
                            drop_duplicates=True,
                            long_group_excess_cond_threshold=0.1):
        factor_data = factor_data.set_index(['stock_code', 'date'])

        # TODO - 加载分析数据
        st = time()
        labeled_analysis_data = self.load_and_concat_factor_data(factor_data)
        et = time()
        print(factor_name, 'load and concat factor data done, time spent is %.5f sec.' % (et-st))

        # TODO - 数据过滤
        st = time()
        filltered_analysis_data = self.fillter_analysis_data(labeled_analysis_data)

        # TODO - 释放一些空间
        del labeled_analysis_data

        et = time()
        print(factor_name, 'filter data done, time spent is %.5f sec.' % (et-st))

        # TODO - 数据预处理
        st = time()
        processed_analysis_data = \
            self.analysis_data_processed(filltered_analysis_data,
                                         factor_name=factor_name,
                                         fillna=fillna,
                                         winsorize=winsorize,
                                         normalization=normalization,
                                         drop_duplicates=drop_duplicates)

        # TODO - 释放一些空间
        del filltered_analysis_data

        et = time()
        print(factor_name, 'process data done, time spent is %.5f sec.' % (et-st))

        # TODO - 分组收益分析
        st = time()

        quantile_result_stats = self.cal_quantile_return(label_factor_data=processed_analysis_data,
                                                         benchmark_index_ret=self.index_data,
                                                         factor_name=factor_name,
                                                         rebalance_date=self.rebalance_date_data,
                                                         group_quantile=self.group_quantile,
                                                         neutral_group_quantile=self.neutral_group_quantile,
                                                         group_method=self.group_method,
                                                         weight_method=self.weight_method)

        group_excess_annual_return = quantile_result_stats['excess_annual_return'].dropna()

        # TODO - 线性度
        direction = group_excess_annual_return.iloc[-1] - group_excess_annual_return.iloc[0]
        direction = 1.0 if direction > 0 else -1.0

        # TODO - 超额收益
        long_group_excess = group_excess_annual_return.iloc[-1] if direction > 0 else group_excess_annual_return.iloc[0]

        long_group_excess_cond = long_group_excess > long_group_excess_cond_threshold


        et = time()
        print(factor_name, 'factor quantile group analysis done, time spent is %.5f sec.' % (et - st))

        # TODO - 释放一些空间
        del processed_analysis_data

        return long_group_excess, long_group_excess_cond, direction, quantile_result_stats

    # # TODO - 因子列表汇总分析
    # def factor_list_analysis_main(self,
    #                               factor_name_list,
    #                               summary_result_filename_prefix,
    #                               summary_result_save_dir,
    #                               single_factor_analysis_result_cache_dir,
    #                               single_factor_plot_save_dir=None,
    #                               **kwargs):
    #     '''
    #     :param factor_name_list: 因子名称列表
    #     :param summary_result_filename_prefix: 所有因子分析信息汇总文件名前缀
    #     :param summary_result_save_dir: 所有因子分析信息汇总文件夹
    #     :param single_factor_analysis_result_cache_dir: 单个因子信息输出文件夹
    #     :param single_factor_plot_save_dir: 单个因子画图信息输出文件夹
    #     :return:
    #     '''
    #
    #     if 'fillna' in kwargs:
    #         fillna = kwargs['fillna']
    #     else:
    #         fillna = False
    #
    #     if 'winsorize' in kwargs:
    #         winsorize = kwargs['winsorize']
    #     else:
    #         winsorize = False
    #
    #     if 'normalization' in kwargs:
    #         normalization = kwargs['normalization']
    #     else:
    #         normalization = False
    #
    #     if 'drop_duplicates' in kwargs:
    #         drop_duplicates = kwargs['drop_duplicates']
    #     else:
    #         drop_duplicates = True
    #
    #     # TODO - 是否保存持仓数据
    #     if 'is_save_position_data' in kwargs:
    #         is_save_position_data = kwargs['is_save_position_data']
    #     else:
    #         is_save_position_data = False
    #
    #     tst = time()
    #
    #     # TODO - 数据加载
    #     # self.initialization()
    #
    #     for factor_name in factor_name_list:
    #         try:
    #             tmp_st = time()
    #
    #             if single_factor_plot_save_dir is not None:
    #                 all_plot_save_dir = os.path.join(single_factor_plot_save_dir, factor_name)
    #
    #                 if not os.path.exists(all_plot_save_dir):
    #                     os.makedirs(all_plot_save_dir)
    #             else:
    #                 all_plot_save_dir = None
    #
    #             ret, \
    #             cum_abs_ret, \
    #             excess_ret, \
    #             quantile_result_stats = self.one_factor_analysis(factor_name=factor_name,
    #                                                              analysis_result_cache_dir=single_factor_analysis_result_cache_dir,
    #                                                              descibes_analysis_result_cache_dir=all_plot_save_dir,
    #                                                              fillna=fillna,
    #                                                              winsorize=winsorize,
    #                                                              normalization=normalization,
    #                                                              drop_duplicates=drop_duplicates,
    #                                                              is_save_position_data=is_save_position_data)
    #
    #             tmp_et = time()
    #             print(factor_name+' analysis all done, time spend is : %.4f sec' % (tmp_et-tmp_st))
    #         except Exception as e:
    #             print(factor_name+ ' analysis, error :')
    #             print(e)
    #             pass
    #
    #
    #     tet = time()
    #     print('analysis, total time spend is : %.4f sec.\n' % (tet-tst))


