#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
current_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(current_path)
import time
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
import itertools
from utils import reader, tools, ind
import plotly
from matplotlib import pyplot
import plotly as py
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from program.config import *

pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_rows', 1000)  # 最多显示数据的行数
# setcopywarning
pd.set_option('mode.chained_assignment', None)


def transfrom_df(df, col_name, value_name):
    res = pd.DataFrame()
    for i, group in df.groupby(col_name):
        if res.empty:
            res = group.set_index('candle_begin_time')[value_name].to_frame(i)
        else:
            res[i] = group.set_index('candle_begin_time')[value_name]
    return res


# 用于计算唯一且有序的分位数边缘值的函数
def calculate_quantile_edges(series, n_quantiles):
    quantiles = np.linspace(0, 1, n_quantiles + 1)
    edges = series.quantile(quantiles).unique()
    edges.sort()  # 对边缘值进行排序
    return edges


class FactorAnalysis(object):
    def __init__(self, start_date, end_date, hold_hour, factor_class_list, is_save_csv=False,group_num=10,back_hour_list=[3, 4, 6, 8, 12, 24, 48, 72 ]):
        # 文件地址
        _ = os.path.abspath(os.path.dirname(__file__))  # 返回当前文件路径
        abs_path = os.path.abspath(os.path.join(_))  # 返回根目录文件夹
        sys.path.append(os.getcwd())
        self.res_path = os.path.join(abs_path, 'data/回测结果/因子分析结果')

        plt.rcParams['font.sans-serif'] = ['SimHei']  # 显示汉字
        plt.rcParams['axes.unicode_minus'] = False
        # 起始与结束
        self.begD = start_date
        self.endD = end_date
        self.c_rate = 6 / 10000
        self.hold_hour = hold_hour  # 持仓周期
        self.group_num = group_num  # 分组数量
        self.back_hour_list = back_hour_list
        # [3, 4, 6, 8, 9, 12, 24, 30, 36, 48, 60, 72, 96, 144, 233, 377]
        self.diff_list = [0, ]
        self.black_list = ['UNFIUSDT','LUNAUSDT']  # 黑名单
        self.factor_class_list = factor_class_list
        self.feature_list = self._init_factor(self.factor_class_list)
        self.data = self.get_data()
        print('获取数据完成')
        self.group = self.get_group_by_factor()
        self.benchmark_ret = self.data[self.data.symbol == 'BTCUSDT'][['candle_begin_time', 'avg_price']].copy()
        self.benchmark_ret['ret'] = self.benchmark_ret['avg_price'].pct_change()
        self.benchmark_ret['ret'].fillna(0, inplace=True)
        self.benchmark_ret['net_value'] = self.benchmark_ret.ret.add(1).cumprod()
        self.is_save_csv = is_save_csv  # 是否保存csv
        # TODO 做一个candle_begin_time和offset的常量表
        self.time_series = self.data[self.data.symbol == 'BTCUSDT'][['candle_begin_time']]
        self.time_series = self.time_series.reset_index(drop=True)
        self.time_series['offset'] = self.time_series['candle_begin_time'].apply(lambda x: int(
            ((x.to_pydatetime() - pd.to_datetime('2017-01-01')).total_seconds() / 3600) % int(hold_hour[:-1])))

        self.make_path()

        self.group_ret = None
        self.ret_mean = None
        self.ret_IR = None
        self.ic = None
        self.icir = None
        self.all_ic = None
        # self.data_offset = []
        # for offset in range(int(hold_hour[:-1])):
        #     times = self.data[self.data.symbol == 'BTCUSDT'][['candle_begin_time']]
        #     times['time'] = times['candle_begin_time']
        #     times = times.set_index('candle_begin_time').resample('6H', offset=f'{self.offset}H').agg(
        #         {'time': 'last'}).reset_index()
        #     self.data_offset.append(self.data.merge(times.reset_index()[['candle_begin_time']]))

    def make_path(self):
        self.picture_path = os.path.join(self.res_path, self.hold_hour, 'picture')
        self.csv_path = os.path.join(self.res_path, self.hold_hour, 'csv')

        paths = [self.res_path, self.picture_path, self.csv_path]

        for i in self.feature_list:
            for n in range(int(self.hold_hour[:-1])):
                paths.append(os.path.join(self.picture_path, i.split('_bh_')[0], i))
                paths.append(os.path.join(self.csv_path, i.split('_bh_')[0], i))

        for p in paths:
            os.makedirs(p, exist_ok=True)
            if not os.path.exists(p):
                os.makedirs(p)

        print('输出文件地址已生成 =================')

    def _init_factor(self, factor_class_list):
        # 0.3, 0.5, 0.9]
        factor_list = []
        for factor_name, back_hour, d_num in itertools.product(factor_class_list, self.back_hour_list,
                                                               self.diff_list):
            if d_num == 0:
                _factor = f'{factor_name}_bh_{back_hour}'
            else:
                _factor = f'{factor_name}_bh_{back_hour}_diff_{d_num}'
            factor_list.append(_factor)
        feature_list = factor_list
        print(feature_list)
        return feature_list

    def get_data(self):
        df = reader.readall('swap', self.hold_hour, self.factor_class_list, filter_class_list=[],
                            date_range=(self.begD, self.endD))
        # 删除某些行数据
        df = df[df['volume'] > 0]  # 该周期不交易的币种
        df.dropna(subset=['下个周期_avg_price'], inplace=True)  # 最后几行数据，下个周期_avg_price为空
        # 筛选日期范围
        df = df[df['candle_begin_time'] >= pd.to_datetime(self.begD)]
        df = df[df['candle_begin_time'] <= pd.to_datetime(self.endD)]
        # ===数据预处理
        df = df.sort_values('candle_begin_time')
        df['ret'] = (df['下个周期_avg_price'] / df['avg_price'] - 1)
        df['ret_next'] = df.groupby('symbol')['ret'].shift(-1)
        df = df[df.ret_next.notnull()]
        # 拉黑币种
        df = df[~df.symbol.isin(self.black_list)]
        df = df.replace([np.inf, -np.inf], np.nan)
        df = df.set_index(['candle_begin_time', 'symbol']).sort_index()
        # 横截面排名
        df[self.feature_list] = df.groupby('candle_begin_time')[self.feature_list].apply(
            lambda x: x.rank(pct=True, ascending=True))
        df[self.feature_list] = df.groupby('candle_begin_time')[self.feature_list].apply(lambda x: x.fillna(x.median()))
        df.reset_index(inplace=True)
        return df

    def get_group_by_factor(self):
        # factor_name = 'Bias_bh_4_diff_0.5'
        result = {}
        for factor_name in self.feature_list:
            if '_bh_' in factor_name:
                print(f'{factor_name}:正在切片')
                df = self.data[['candle_begin_time', 'symbol', 'ret_next', factor_name]].copy()

                df['num'] = df.groupby('candle_begin_time')[factor_name].apply(
                    lambda x: pd.qcut(x, self.group_num, labels=range(0, self.group_num)))
                # ValueError: Bin edges must be unique:
                # 使用cut函数代替qcut函数

                # edges = df.groupby('candle_begin_time')[factor_name].apply(
                #     lambda x: calculate_quantile_edges(x, self.group_num))
                #
                # df['num'] = df.groupby('candle_begin_time')[factor_name].apply(
                #     lambda x: pd.cut(x, bins=edges[x.name], labels=range(0, len(edges[x.name]) - 1),
                #                      include_lowest=True))

                result[factor_name] = df
        return result

    def calc_group_IC(self):
        # 分组IC
        #   factor1 | factor2 | factor3
        # 0  ic1_0     ic2_0     ic3_0
        # 1  ic1_1     ic2_1     ic3_1
        # 2  ic1_2     ic2_2     ic3_2
        # 3  ic1_3     ic2_3     ic3_3
        res = {}
        res1 = pd.DataFrame()
        res2 = pd.DataFrame()
        for factor_name, df in self.group.items():
            print(f'======{factor_name}-groupIC======')
            df = df.merge(self.time_series, how='left')


            temp = df.groupby(['candle_begin_time']).apply(
                lambda x: np.corrcoef(x[factor_name], x['ret_next'])[0, 1]).to_frame(f'{factor_name}_IC')
            temp = temp.reset_index()
            temp = temp.merge(self.time_series, how='left')
            res[factor_name] = temp
            if res1.empty:
                res1 = temp.groupby(['offset']).apply(
                    lambda x: x[f'{factor_name}_IC'].mean()).to_frame(f'{factor_name}_IC_mean')
            else:
                res1[f'{factor_name}_IC_mean'] = temp.groupby(['offset']).apply(
                    lambda x: x[f'{factor_name}_IC'].mean())

            if res2.empty:
                res2 = temp.groupby(['offset']).apply(lambda x: x[f'{factor_name}_IC'].mean() / x[f'{factor_name}_IC'].std()).to_frame(
                    f'{factor_name}_ICIR')
            else:
                res2[f'{factor_name}_ICIR'] = temp.groupby(['offset']).apply(lambda x: x[f'{factor_name}_IC'].mean() / x[f'{factor_name}_IC'].std())
        self.ic = res1
        self.icir = res2
        self.all_ic = res
        return res1, res2

    def calc_group_ret(self):
        # 分组收益
        #   factor1 | factor2 | factor3
        # 0  ret1_0    ret2_0    ret3_0
        # 1  ret1_1    ret2_1    ret3_1
        # 2  ret1_2    ret2_2    ret3_2
        # 3  ret1_3    ret2_3    ret3_3
        res = {}
        res1 = pd.DataFrame()
        res2 = pd.DataFrame()
        for factor_name, df in self.group.items():
            print(f'======{factor_name}-groupRet======')
            df = df.merge(self.time_series, how='left')
            if res1.empty:
                res1 = df.groupby(['offset', 'num']).agg({'ret_next': 'mean'})
                res1.rename(columns={'ret_next': f'{factor_name}_ret_mean'}, inplace=True)
            else:
                res1[f'{factor_name}_ret_mean'] = df.groupby(['offset', 'num']).agg({'ret_next': 'mean'})
            temp = df.groupby(['candle_begin_time', 'num']).agg({'ret_next': 'mean', 'offset': 'last'})
            temp0 = transfrom_df(temp.reset_index(), col_name='num', value_name='ret_next')
            temp0['offset'] = self.time_series.set_index('candle_begin_time')['offset']
            res[factor_name] = temp0
            if res2.empty:
                res2 = temp.groupby(['offset', 'num']).apply(lambda x: x.ret_next.mean() / x.ret_next.std()).to_frame(
                    f'{factor_name}_ret_IR')
            else:
                res2[f'{factor_name}_ret_IR'] = temp.groupby(['offset', 'num']).apply(
                    lambda x: x.ret_next.mean() / x.ret_next.std())
        self.group_ret = res
        self.ret_mean = res1
        self.ret_IR = res2
        print(f'calc_group_ret ===========完成')
        return res, res1, res2

    def _save_csv_group_ret(self):
        for f in self.feature_list:
            df = self.group_ret[f]
            p0 = os.path.join(self.csv_path, f.split('_bh_')[0], f)
            for n in range(int(self.hold_hour[:-1])):
                temp = df[df.offset == n].copy()
                temp = temp.add(1).cumprod()
                p = os.path.join(p0, f'{f}_资金曲线_offset_{n}.csv')
                temp.to_csv(p, index=True, encoding='gbk')

    def _save_csv_all_ic(self):
        for f in self.feature_list:
            df = self.all_ic[f]
            p0 = os.path.join(self.csv_path, f.split('_bh_')[0], f)
            for n in range(int(self.hold_hour[:-1])):
                temp = df[df.offset == n].copy()
                p = os.path.join(p0, f'{f}_时间序列IC_offset_{n}.csv')
                temp.to_csv(p, index=True, encoding='gbk')

    def _save_csv_ret_mean(self):
        for f in self.factor_class_list:
            p0 = os.path.join(self.csv_path, f)
            p = os.path.join(p0, f"{f}_ret_mean.csv")
            self.ret_mean.to_csv(p, index=True, encoding='gbk')

    def _save_csv_ret_IR(self):
        for f in self.factor_class_list:
            p0 = os.path.join(self.csv_path, f)
            p = os.path.join(p0, f"{f}_ret_IR.csv")
            self.ret_mean.to_csv(p, index=True, encoding='gbk')

    def _save_csv_IC(self):
        for f in self.factor_class_list:
            p0 = os.path.join(self.csv_path, f)
            p = os.path.join(p0, f"{f}_IC_mean.csv")
            self.ret_mean.to_csv(p, index=True, encoding='gbk')

    def _save_csv_ICIR(self):
        for f in self.factor_class_list:
            p0 = os.path.join(self.csv_path, f)
            p = os.path.join(p0, f"{f}_ICIR.csv")
            self.icir.to_csv(p, index=True, encoding='gbk')

    def save_csv(self):
        """
        保存csv文件
        """
        if self.is_save_csv:
            self._save_csv_group_ret()
            self._save_csv_ret_mean()
            self._save_csv_ret_IR()
            self._save_csv_IC()
            self._save_csv_ICIR()
            self._save_csv_all_ic()

    def save_png_once(self, feature, offset):
        print(f'{feature}-offset={offset}: 正在保存图片')
        fig = plt.figure(figsize=(10, 16))
        ax_ic = fig.add_subplot(521)  # 分组 ic
        ax_icir = fig.add_subplot(522)  # 分组 icir
        ax_group_ic = fig.add_subplot(512)  # 时间序列ic
        ax_ret_m = fig.add_subplot(525)  # 分组 平均收益
        ax_ret_ir = fig.add_subplot(526)  # 分组 平均收益ir
        ax_group_ret = fig.add_subplot(514)  # holdhour分组资金曲线 与 基准
        ax_LS_ret = fig.add_subplot(515)    # 多空

        # 分组ic
        ic = self.ic[f'{feature}_IC_mean']
        ax_ic.title.set_text('分offset-IC')
        # ax_ic.plot(ic.index, [ic.mean() for i in ic.index], linestyle='--',color='blue',lable=f'ic均值：{ic.mean()}')
        # ax_ic.set_ylim(ic.min() / 1.1, ic.max() * 1.1)
        ax_ic.axhline(ic.mean(), linestyle='dashed', linewidth=1,color='grey')
        ax_ic.text(x=ic.index[-1]/2, y=ic.mean()*1.005, s=f'ic均值：{round(ic.mean(), 4)}')
        ax_ic.bar(x=ic.index, height=ic, width=0.3) # lable=f'ic均值：{ic.mean()}'
        # 分组icir
        icir = self.icir[f'{feature}_ICIR']
        ax_icir.title.set_text('分offset-ICIR')
        # ax_icir.plot(icir.index, [icir.mean() for i in icir.index], linestyle='--', color='blue',lable=f'icir均值：{icir.mean()}')
        # ax_icir.set_ylim(icir.min() / 1.1, icir.max() * 1.1)
        ax_icir.axhline(icir.mean(), linestyle='dashed', linewidth=1,color='grey')
        ax_icir.text(x=icir.index[-1] / 2, y=icir.mean()*1.005, s=f'icir均值：{round(icir.mean(), 3)}')
        ax_icir.bar(x=icir.index, height=icir, width=0.3, )
        # 时间序列ic
        df_group_ic = self.all_ic[feature]
        group_ic = df_group_ic[df_group_ic.offset == offset].copy().set_index('candle_begin_time')
        ic_g = group_ic[group_ic[f'{feature}_IC'] >= 0]
        ic_r = group_ic[group_ic[f'{feature}_IC'] < 0]
        ax_group_ic.title.set_text('时间序列-IC')
        ax_group_ic.bar(x=ic_g.index, height=ic_g[f'{feature}_IC'])
        ax_group_ic.bar(x=ic_r.index, height=ic_r[f'{feature}_IC'])
        # 分组收益
        df_group_ret = self.group_ret[feature]
        temp = df_group_ret[df_group_ret.offset == offset].copy()
        temp = temp.add(1).cumprod()
        temp = temp.drop('offset', axis=1)
        ax_group_ret.title.set_text('分组收益')
        ax_group_ret.plot(temp, label=temp.columns)
        # ax_group_ret.semilogy(temp, label=temp.columns) # TODO
        # ax_group_ret.legend(ncol=int(round(self.group_num/10,0)))
        ncol = max(1, int(round(self.group_num / 10, 0)))
        ax_group_ret.legend(ncol=ncol)
        # 平均收益
        ret_mean = self.ret_mean.loc[(offset), f'{feature}_ret_mean']
        ax_ret_m.title.set_text('分组平均收益')
        # ax_ret_m.plot(ret_mean.index, [0 for i in ret_mean.index], linestyle='--', color='blue')
        # ax_ret_m.set_ylim(ret_mean.min() / 1.1, ret_mean.max() * 1.1)
        ax_ret_m.bar(x=ret_mean.index, height=ret_mean, width=0.3, )
        # 收益波动性ax_ret_ir
        ret_ir = self.ret_IR.loc[(offset), f'{feature}_ret_IR']
        ax_ret_ir.title.set_text('分组平均收益IR')
        # ax_ret_ir.plot(ret_ir.index, [0 for i in ret_ir.index], linestyle='--', color='blue')
        # ax_ret_ir.set_ylim(ret_ir.min() / 1.1, ret_ir.max() * 1.1)
        ax_ret_ir.bar(x=ret_ir.index, height=ret_ir, width=0.3, )
        # 多空收益  基准
        t0 = df_group_ret[df_group_ret.offset == offset].copy()
        t0 = t0.drop('offset', axis=1)
        cols = t0.columns
        t0['benchmark'] = t0.mean(axis=1)
        t0['LS'] = t0[cols[0]] - t0[cols[-1]]
        t0 = t0[[cols[0], cols[-1], 'benchmark', 'LS']]
        t0 = t0.rename(columns={cols[0]: '多头', cols[-1]: '空头'})
        t0 = t0.add(1).cumprod()
        # ax_LS_ret.legend(['最大回撤: %.1f%%' % max_drawdown], loc=4, frameon=True, framealpha=0.5)

        ax_LS_ret.title.set_text('多头-空头-多空收益和基准收益')
        # ax_LS_ret.semilogy(t0, label=t0.columns) # 对数坐标
        ax_LS_ret.plot(t0, label=t0.columns)
        ax_LS_ret.legend()

        cum_max = t0['LS'].cummax()
        drawdown = 100 * (t0['LS'] - cum_max) / cum_max
        max_drawdown = drawdown.min()
        ax2 = ax_LS_ret.twinx()
        ax2.fill_between(drawdown.index, 0, drawdown.values, color='#808080', alpha=0.5)  # #808080
        ax2.set_ylim(int(max_drawdown) - 1, 0)
        ax2.legend(['最大回撤: %.1f%%' % max_drawdown], loc=1, frameon=True, framealpha=0.5)

        plt.suptitle(f'{feature}-offset={offset}-因子分析图', )  # 主图标题

        fig.tight_layout()
        # plt.show()
        p = os.path.join(self.picture_path, feature.split('_bh_')[0], feature,
                         f'{feature}-offset={offset}-因子分析图.png')
        plt.savefig(p)
        print(f'{feature}-offset={offset}: 保存成功，图片地址-{p}')

    def save_png(self):
        for f in self.feature_list:
            for n in range(int(self.hold_hour[:-1])):
                self.save_png_once(f, n)

    def run(self):
        self.calc_group_IC()
        self.calc_group_ret()
        self.save_csv()
        self.save_png()


if __name__ == '__main__':
    start_date = start_date
    end_date = end_date
    fa = FactorAnalysis(
        start_date = start_date,
        end_date = end_date,
        hold_hour=f'{hold_hour_num}H',  # 持仓周期
        factor_class_list = factor_name_list,  # 因子列表 config.factor_class_list
        is_save_csv=True,  # 是否保存数据结果
        group_num=10,  # 分组数量  一般是5, 10，20
        back_hour_list= factor_params_list # 设置因子参数
                        )
    fa.run()
