# -*- coding: utf-8 -*-
# @time: 2024/1/19 15:38
# @file: factor_test
# @author: tyshixi08

import pandas as pd
import numpy as np
import empyrical as ep
import matplotlib.pyplot as plt
from matplotlib.pyplot import MultipleLocator
from matplotlib.dates import MonthLocator
from collections import OrderedDict
from sqlalchemy import create_engine
from tqdm import tqdm
from iFinDPy import *
import Dfactor_get_data as get_data
import rqdatac
import datetime
import time
import math
import os

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.style.use('ggplot')


#%% Data Progress

# 提取因子值
def get_factordata(start, end, fn): # 提取因子值
    engine = create_engine('mysql+pymysql://hs_wangwenjie:hs_wangwenjie#A0@192.168.201.185:3306/wangwj?charset=utf8')
    query = ("""SELECT * from conv_factor_new where t_date>='%s' and t_date<='%s' and factor_name = '%s'""") % (start, end, fn) 
    data = pd.read_sql_query(query, engine)
    return data

# 数据分组
def set_quantile(df_slice, field, q):
    """
    按因子值平均数量分5组
    """
    #print(df_slice.t_date.unique())
    if df_slice.value.count() == 0:
        df_slice['quantile'] = np.nan
    else:
        df_slice = df_slice.dropna(subset=['value'], axis=0)  # 把因子值为空的剔除
        df_slice['quantile'] = pd.qcut(df_slice[field], q=q, duplicates='drop', labels=False) + 1
        if max(df_slice['quantile']) < 5:
            df_slice['quantile'] = np.nan
    return df_slice

# 数据合并 
def get_factor_data(fn, df_filtered, datetime_list): 
    """
    获取因子值以及5m收益率
    """ 
    print('提取因子值&因子分组')
    cal_time = time.time()
    #df_factor = get_data.df_factor_data(fn, '2018-01-02', end, freq)
    df_factor = get_factordata(start, end, fn)
    df_factor = df_factor[['t_date','c_code','value']]
    df_factor = df_factor.set_index(['t_date','c_code'])
    df_factor['ret_nextday'] = df_filtered
    df_factor = df_factor.reset_index()
    df_factor = df_factor.groupby(['t_date']).apply(set_quantile, 'value', 5).reset_index(drop=True)
    df_count = df_factor.groupby('t_date')['quantile'].count()
    print('有{}个时点不换仓'.format(df_count[df_count == 0].count()))
    # 如果分不出来5组，就不做换仓
    #df_factor = df_factor.groupby('c_code')[df_factor.columns].fillna(method = 'ffill')
    df_factor['quantile'] = df_factor.groupby('c_code')['quantile'].fillna(method = 'ffill')
    df_factor = df_factor[(df_factor.t_date >= Date_list[0]) & (df_factor.t_date <= Date_list[-1])]
    df_factor = df_factor.dropna(subset=['quantile'], axis=0)  # 把分组值为空的剔除(一开始就分不出来，fillna也解决不了)
    print('计算用时: {:.2f}s'.format(time.time() - cal_time))
    return df_factor

# 换仓比例
def cal_difference(fn, df_factor, method):
    """
    计算换仓比例
    """
    def weight_calculate(df_slice):
        # 计算个券权重
        df_slice['weight'] = 1 / len(df_slice)
        return df_slice

    def delta_calculate(df_slice):
        # 计算前后两期权重之差的绝对值
        df_slice['difference'] = abs(df_slice['weight'] - df_slice['weight'].shift(1))
        return df_slice

    def quantile_difference(df_slice):
        diff = df_slice[['t_date', 'c_code']].copy()
        # 计算个券权重
        diff = diff.groupby('t_date', as_index=False).apply(weight_calculate).reset_index(drop=True)
        # 用pivot转化为二维表
        diff = diff.pivot(index='t_date', columns='c_code', values='weight')
        # 再unstack转化为一维表
        diff = diff.fillna(0).unstack().reset_index()
        diff.columns = ['c_code', 't_date', 'weight']
        # 计算每日个券权重及前后两期权重值之差的绝对值
        diff = diff.groupby('c_code', as_index=False).apply(delta_calculate).reset_index(drop=True)
        sum_diff = diff.groupby('t_date', as_index=False).difference.sum()
        return sum_diff
    
    df = df_factor.copy()  # 提取合并数据
    df_diff = df.groupby('quantile').apply(quantile_difference).reset_index()
    df_diff = df_diff[['t_date','quantile','difference']]
    return df_diff

# 收益计算
def cal_factor_data(fn, df_factor, method):
    """
    计算1-5组的时点收益
    """
    df = df_factor.copy() # 提取合并数据
    df_ret = df.groupby(['t_date', 'quantile'])['ret_nextday'].mean().reset_index() # 计算分组收益均值
    df_ret = df_ret.pivot('t_date','quantile','ret_nextday').shift(2).fillna(0)
    
    df_diff = cal_difference(fn, df_factor, method) # 提取换仓比例
    df_diff = df_diff.pivot('t_date','quantile','difference')
    df_diff.iloc[:2,:] = 0
        
    df_change = df_diff.copy() # 计算换手率
    df_change = df_change.iloc[2:,:]
    
    df_nav = pd.DataFrame(index = df_ret.index) # 计算分组净值
    df_nav.loc[df_ret.index[0], df_ret.columns] = 1
    for i in range(1, len(df_ret)):
        for col in df_ret.columns:
            # 按换仓比例考虑手续费
            new_ret = df_ret.loc[df_ret.index[i], col] - 0.00004 * df_diff.loc[df_ret.index[i], col] * \
                      df_nav.loc[df_ret.index[i - 1], col]
            df_nav.loc[df_ret.index[i], col] = df_nav.loc[df_ret.index[i - 1], col] * (1 + new_ret) 
    df_ret_new = df_nav.pct_change()
    
    # 导出时序换手率
    exl_path = os.path.join(new_dir, '日换手率')
    if not os.path.exists(exl_path):
        os.makedirs(exl_path)
    wb = pd.ExcelWriter(os.path.join(exl_path, "{}.xlsx".format(fn)))
    df_change.to_excel(wb, sheet_name = 'ts_turnover')
    df_change.mean(0).to_excel(wb, sheet_name = 'avg_turnover')
    wb.save()
    return  df_ret_new, df_nav


# 基准指数
def get_index(start, end, method):
    """
    提取rqdata转债等权指数作为基准
    """
    rqdatac.init('license', 'AcBHy5_JJ6wjZdu7Q-ey7dX-J3BmyEC_KblY2Q_hBeOuoBaeBbgXTNSe6XZvqKVESbyUf7vMpLLGuO_aqyb3w9fWGI7q4wdClE6cMp_Z3N4PqqTHJ0nr3CIuXtk-5XzSD1p7NTdNcrAfZlRVpMMtY_PDC9FYuXNmC_EnuQg4H-A=fGk9EhHcK3xN189iXYSWLyiMdGUeXXlVZqr2MxhBypSHxQYnIIyxyM8BR8oNnVUdWhKx-ZrFRIjSONd7uYpOvpcBab92P60iAR_JopX61emtrvsY1xG_uCfYhDPBdDSJKaniJhTPuoBIU4JZun8-8fMIxzx7lnwBm2kAUOA_Mpg=')
    data = rqdatac.get_price('866005.RI', start_date=start, end_date=end, frequency='1d', fields=None,
                             adjust_type='none', skip_suspended=False, market='cn', expect_df=True, time_slice=None)
    data = data.reset_index()
    df_index = pd.DataFrame(np.array(data['{}'.format(method)].T), index=data['date'], columns=['benchmark'])
    return df_index


#%% Retult Output

# 画图formula
def fig_nav(fn, Port, title, ncol, path):
    fig = plt.figure(figsize=(30, 15))
    ax1 = fig.add_subplot(111)
    ax1.set_title('{}'.format(fn), fontsize=40)
    for c in Port.columns:
        ax1.plot(Port[c], linewidth=5, label=c)
    for ticks in plt.gca().xaxis.get_major_ticks():
        ticks.label1.set_fontsize(20) 
        ticks.label1.set_color('black')
    for ticks in plt.gca().yaxis.get_major_ticks():
        ticks.label1.set_fontsize(20)
        ticks.label1.set_color('black')
    x_major_locator = MultipleLocator(200)
    ax1.xaxis.set_major_locator(x_major_locator)
    plt.gca().set_facecolor('white')
    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = OrderedDict(zip(labels, handles))
    plt.legend(by_label.values(), by_label.keys(), loc='center left', bbox_to_anchor=(1,0.5), ncol=ncol, fontsize=25)
    plt.savefig(os.path.join(path, '{}.png'.format(fn)), bbox_inches='tight')
    return

# IC计算
def cal_ic(fn, df_factor):
    """
    计算因子IC值
    """
    rank_ic = list()
    #norm_ic = list()
    factor_dateindex = pd.DataFrame(df_factor.t_date.unique())
    factor_dateindex = factor_dateindex.iloc[:,0].tolist()
    print('计算IC因子值')
    cal_time = time.time()
    for dt in tqdm(factor_dateindex):
        rank_ic.append(df_factor[df_factor.t_date == dt][['value','ret_nextday']].corr(method='spearman').iloc[0][1])
        #norm_ic.append(df_factor[df_factor.t_date == dt][['value','ret_next5m']].corr(method='pearson').iloc[0][1])
    print('计算用时: {:.2f}s'.format(time.time() - cal_time))
    data_ic = pd.DataFrame()
    data_ic['{}'.format(fn)] = pd.Series(rank_ic, index=factor_dateindex)
    #data_ic['norm_ic'] = pd.Series(norm_ic, index=factor_dateindex)
    data_ic[data_ic==0] = np.nan
    
    ic_avg = data_ic.mean(0) # 计算IC均值
    ic_nav= data_ic.copy()
    ic_nav = np.cumsum(ic_nav) # 计算累计IC净值
    ic_nav.name = fn
    return ic_avg, ic_nav
 

# 多头/超额/多空净值
def cal_factor_nav(fn, df_nav, df_ret, reverse=False):
    """
    计算因子多头/超额/多空日净值
    """
    quantile_top = df_nav.columns[0]
    quantile_bot = df_nav.columns[-1]
    if reverse==False:
        top = quantile_top
        bot = quantile_bot
    else:
        top = quantile_bot
        bot = quantile_top

    df_long_short = pd.DataFrame(ep.cum_returns((df_ret[top] - df_ret[bot])/2, starting_value=1),columns=['{}'.format(fn)])
    df_long_short = df_long_short.iloc[1:,:]
    df_long_short.columns = [fn]
    
    df_long = pd.DataFrame(df_nav[top])
    df_long = df_long.iloc[1:,:]
    df_long.columns = [fn]
    
    df_excess = df_long.copy()
    df_excess['benchmark'] = df_index
    df_excess = df_excess / df_excess.iloc[0,:]
    df_excess['ret'] = df_excess[fn] / df_excess['benchmark']
    df_excess = pd.DataFrame(df_excess['ret'])
    df_excess.columns = [fn]
    
    nav_fig = df_nav.copy()
    nav_fig = nav_fig.iloc[1:,:]
    nav_fig['benchmark'] = df_index
    nav_fig = nav_fig / nav_fig.iloc[0,:]
    for i in df_nav.columns:
        nav_fig[i] = nav_fig[i] / nav_fig['benchmark']
    nav_fig = nav_fig[df_nav.columns]
    # 图输出
    fig_path = os.path.join(new_dir, '分组超额净值')
    if not os.path.exists(fig_path):
        os.makedirs(fig_path)
    fig_nav(fn, nav_fig, '{}_分组超额'.format(fn), 1, fig_path)
    # 表输出
    wb = pd.ExcelWriter(os.path.join(fig_path, "{}_分组超额.xlsx".format(fn)))
    nav_fig.to_excel(wb)
    wb.save() 
    return df_long, df_excess, df_long_short


#%% Execution

if __name__ == '__main__':
    start = '2022-06-30'
    end = '2024-06-28'
    method = 'close'
    freq = 1
    path = os.getcwd()
    new_dir = os.path.join(path, 'output/回测结果({}~{}_{})'.format(start, end, method))
    if not os.path.exists(new_dir):
        os.makedirs(new_dir)
    
    # 提取基准指数
    df_index = get_index(start, end, method) 
    df_index = df_index / df_index.iloc[0,:]
    
    # 提取转债收益率
    df_filtered = pd.read_csv(os.path.join('data', 'data_after_filter.csv'))
    df_filtered['Date'] = pd.to_datetime(df_filtered['Date'])
    if method == 'close':
        df_filtered = df_filtered[['Date','c_bondCode','ret_nextday_close']]
    else:
        df_filtered = df_filtered[['Date','c_bondCode','ret_nextday_open']]
    df_filtered = df_filtered[(df_filtered.Date >= '2018-01-02') & (df_filtered.Date <= end)]
    Date_list = df_filtered[df_filtered.Date >= start].Date.unique()
    df_filtered = df_filtered.set_index(['Date','c_bondCode'])
    
    factor_names = [
                    'sec_convprice',
                    'sec_convsize',
                    'sec_conv_stocksize',
                    'sec_ytm',
                    'sec_bond_premium',
                    'sec_IV_BS',
                    'sec_IV_delta',
                    'sec_weightskew_63D',
                    'sec_convturnover',
                    'sec_volatility5d',
                    'sec_conv_amplitude',
                    'sec_volstability_10d',
                    'sec_ret',
                    'sec_priceratio_5d',
                    'sec_convPremium',
                    'sec_modified_premium',
                    'sec_double_low',
                    'sec_amplitude_delta',
                    'sec_ret_delta',
                    'sec_stock_mv',
                    'sec_stock_ret_20D',
                    'sec_EP_net_ttm',
                    'sec_EP_deducted_ttm',
                    'sec_SUE',
                    'sec_SUE_after',
                    
                    'ts_convprice_roll20d',
                    'ts_convturnover_roll20d',
                    'ts_volatility5d_roll20d',
                    'ts_amplitude_roll20d',
                    'ts_conv_premium_roll20d',
                    'ts_modified_premium_roll20d',
                    'ts_double_low_roll20d',
                    'ts_ret_delta_rol20d',
                    'ts_bond_premium_roll20d',
                    'ts_ret_roll20d',
                    'ts_IV_delta_roll20d',
                    'ts_IV_BS_roll20d',
                    'ts_ytm_roll20d',
                    'ts_volumerank_roll10d'
                    ]

    avg_ic = []
    nav_ic = []
    nav_long = [] # 多头
    nav_excess = [] # 超额
    nav_longshort = [] # 多空
    for fn in factor_names:
        print(fn)
        # 提取因子值
        df_factor = get_factor_data(fn, df_filtered, Date_list)
        reverse = False # 因子方向
        # 计算IC和净值
        df_ret, df_nav = cal_factor_data(fn, df_factor, method) # 分钟级别收益率&分组净值
        ic_avg, ic_nav = cal_ic(fn, df_factor) # 计算IC
        df_long, df_excess, df_long_short = cal_factor_nav(fn, df_nav, df_ret, reverse) # 计算因子多头/超额/多空日净值
        # 数据汇总
        avg_ic.append(ic_avg)
        nav_ic.append(ic_nav)
        nav_long.append(df_long)
        nav_excess.append(df_excess)
        nav_longshort.append(df_long_short)
        
    # 表格输出   
    avg_ic = pd.DataFrame(pd.concat(avg_ic))
    nav_ic = pd.concat(nav_ic,axis=1)
    wb1 = pd.ExcelWriter(os.path.join(new_dir, "rank_ic.xlsx"))
    avg_ic.to_excel(wb1, sheet_name = '均值')
    nav_ic.to_excel(wb1, sheet_name = '日均累计')
    wb1.save()
    
    nav_long = pd.concat(nav_long,axis=1)
    nav_excess = pd.concat(nav_excess,axis=1)
    nav_longshort = pd.concat(nav_longshort,axis=1)
    wb2 = pd.ExcelWriter(os.path.join(new_dir, "facor_nav.xlsx"))
    nav_long.to_excel(wb2, sheet_name = '多头')
    nav_excess.to_excel(wb2, sheet_name = '超额')
    nav_longshort.to_excel(wb2, sheet_name = '多空')
    wb2.save() 