# -*- coding: utf-8 -*-
"""
Created on Tue Jun 11 15:29:42 2024

@author: wangwenjie
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import MultipleLocator
#from matplotlib.dates import MonthLocator
from collections import OrderedDict
from sqlalchemy import create_engine
from datetime import date
from tqdm import tqdm
import Dfactor_get_data
import Dfactor_data_filter as data_filter
import Dfactor_calculate as factor_calculate
import rqdatac
import datetime
import time
import os

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.style.use('ggplot')

#%% 画图
def fig_nav(fn, Port, title, ncol, path):
    fig = plt.figure(figsize=(30, 15))
    ax1 = fig.add_subplot(111)
    ax1.set_title('{}'.format(fn), fontsize=40)
    for c in Port.columns:
        ax1.plot(Port[c], linewidth=5, label=c)
    for ticks in plt.gca().xaxis.get_major_ticks():
        ticks.label1.set_fontsize(20) 
        ticks.label1.set_color('black')
    for ticks in plt.gca().yaxis.get_major_ticks():
        ticks.label1.set_fontsize(20)
        ticks.label1.set_color('black')
    x_major_locator = MultipleLocator(200)
    ax1.xaxis.set_major_locator(x_major_locator)
    plt.gca().set_facecolor('white')
    handles, labels = plt.gca().get_legend_handles_labels()
    by_label = OrderedDict(zip(labels, handles))
    plt.legend(by_label.values(), by_label.keys(), loc='center left', bbox_to_anchor=(1,0.5), ncol=ncol, fontsize=25)
    plt.savefig(os.path.join(path, '{}.png'.format(fn)), bbox_inches='tight')
    return
    

#%% 数据处理

def get_factordata(start, end, fn): # 提取因子值
    engine = create_engine('mysql+pymysql://hs_wangwenjie:hs_wangwenjie#A0@192.168.201.185:3306/wangwj?charset=utf8')
    query = ("""SELECT * from conv_factor_new where t_date>='%s' and t_date<='%s' and factor_name = '%s'""") % (start, end, fn) 
    data = pd.read_sql_query(query, engine)
    return data

def set_quantile(df_slice, field, q):
    """
    按因子值平均数量分5组
    """
    #print(df_slice.t_date.unique())
    if df_slice.value.count() == 0:
        df_slice['quantile'] = np.nan
    else:
        df_slice = df_slice.dropna(subset=['value'], axis=0)  # 把因子值为空的剔除
        df_slice['quantile'] = pd.qcut(df_slice[field], q=q, duplicates='drop', labels=False) + 1
        if max(df_slice['quantile']) < 5:
            df_slice['quantile'] = np.nan
    return df_slice

def get_factor_data(fn, df_filtered, datetime_list): 
    """
    获取因子值以及5m收益率
    """ 
    print('提取因子值&因子分组')
    cal_time = time.time()
    #df_factor = get_data.df_factor_data(fn, '2018-01-02', end, freq)
    df_factor = get_factordata(start, end, fn)
    df_factor = df_factor[['t_date','c_code','value']]
    df_factor = df_factor.set_index(['t_date','c_code'])
    df_factor['ret_nextday'] = df_filtered
    df_factor = df_factor.reset_index()
    df_factor = df_factor.groupby(['t_date']).apply(set_quantile, 'value', 5).reset_index(drop=True)
    df_count = df_factor.groupby('t_date')['quantile'].count()
    print('有{}个时点不换仓'.format(df_count[df_count == 0].count()))
    # 如果分不出来5组，就不做换仓
    #df_factor = df_factor.groupby('c_code')[df_factor.columns].fillna(method = 'ffill')
    df_factor['quantile'] = df_factor.groupby('c_code')['quantile'].fillna(method = 'ffill')
    df_factor = df_factor[(df_factor.t_date >= Date_list[0]) & (df_factor.t_date <= Date_list[-1])]
    df_factor = df_factor.dropna(subset=['quantile'], axis=0)  # 把分组值为空的剔除(一开始就分不出来，fillna也解决不了)
    print('计算用时: {:.2f}s'.format(time.time() - cal_time))
    return df_factor


def cal_difference(fn, df_factor, method): # 换仓比例
    """
    计算换仓比例
    """
    def weight_calculate(df_slice):
        # 计算个券权重
        df_slice['weight'] = 1 / len(df_slice)
        return df_slice

    def delta_calculate(df_slice):
        # 计算前后两期权重之差的绝对值
        df_slice['difference'] = abs(df_slice['weight'] - df_slice['weight'].shift(1))
        return df_slice

    def quantile_difference(df_slice):
        diff = df_slice[['t_date', 'c_code']].copy()
        # 计算个券权重
        diff = diff.groupby('t_date', as_index=False).apply(weight_calculate).reset_index(drop=True)
        # 用pivot转化为二维表
        diff = diff.pivot(index='t_date', columns='c_code', values='weight')
        # 再unstack转化为一维表
        diff = diff.fillna(0).unstack().reset_index()
        diff.columns = ['c_code', 't_date', 'weight']
        # 计算每日个券权重及前后两期权重值之差的绝对值
        diff = diff.groupby('c_code', as_index=False).apply(delta_calculate).reset_index(drop=True)
        sum_diff = diff.groupby('t_date', as_index=False).difference.sum()
        return sum_diff
    
    df = df_factor.copy()  # 提取合并数据
    df_diff = df.groupby('quantile').apply(quantile_difference).reset_index()
    df_diff = df_diff[['t_date','quantile','difference']]
    return df_diff


#%% 分组净值拟合
def get_index(start, end, method): # 基准指数
    """
    提取rqdata转债等权指数作为基准
    """
    rqdatac.init('license', 'AcBHy5_JJ6wjZdu7Q-ey7dX-J3BmyEC_KblY2Q_hBeOuoBaeBbgXTNSe6XZvqKVESbyUf7vMpLLGuO_aqyb3w9fWGI7q4wdClE6cMp_Z3N4PqqTHJ0nr3CIuXtk-5XzSD1p7NTdNcrAfZlRVpMMtY_PDC9FYuXNmC_EnuQg4H-A=fGk9EhHcK3xN189iXYSWLyiMdGUeXXlVZqr2MxhBypSHxQYnIIyxyM8BR8oNnVUdWhKx-ZrFRIjSONd7uYpOvpcBab92P60iAR_JopX61emtrvsY1xG_uCfYhDPBdDSJKaniJhTPuoBIU4JZun8-8fMIxzx7lnwBm2kAUOA_Mpg=')
    data = rqdatac.get_price('866005.RI', start_date=start, end_date=end, frequency='1d', fields=None,
                             adjust_type='none', skip_suspended=False, market='cn', expect_df=True, time_slice=None)
    data = data.reset_index()
    df_index = pd.DataFrame(np.array(data['{}'.format(method)].T), index=data['date'], columns=['benchmark'])
    return df_index

def cal_factor_data(fn, method):
    """
    拟合1-5组净值
    """
    df_factor = get_factor_data(fn, df_filtered, Date_list)
    df = df_factor.copy() # 提取合并数据
    df_ret = df.groupby(['t_date', 'quantile'])['ret_nextday'].mean().reset_index() # 计算分组收益均值
    df_ret = df_ret.pivot('t_date','quantile','ret_nextday').shift(2).fillna(0)
    
    df_diff = cal_difference(fn, df_factor, method) # 提取换仓比例
    df_diff = df_diff.pivot('t_date','quantile','difference')
    df_diff.iloc[:2,:] = 0
        
    df_change = df_diff.copy() # 计算换手率
    df_change = df_change.iloc[2:,:]
    
    df_nav = pd.DataFrame(index = df_ret.index) # 计算分组净值
    df_nav.loc[df_ret.index[0], df_ret.columns] = 1
    for i in range(1, len(df_ret)):
        for col in df_ret.columns:
            # 按换仓比例考虑手续费
            new_ret = df_ret.loc[df_ret.index[i], col] - 0.00004 * df_diff.loc[df_ret.index[i], col] * \
                      df_nav.loc[df_ret.index[i - 1], col]
            df_nav.loc[df_ret.index[i], col] = df_nav.loc[df_ret.index[i - 1], col] * (1 + new_ret) 
    #df_ret_new = df_nav.pct_change()
    return df_nav, df_change


#%% 收益计算
def cal_return(fn, method):
    """
    计算1-5组的多头/超额收益
    """
    df_nav, df_change = cal_factor_data(fn, method)
    
    # 多头
    df_long = pd.DataFrame(df_nav)
    df_long = df_long.iloc[1:,:]
    df_long_ret = pd.DataFrame(df_long.pct_change().iloc[-1,:])
    df_long_ret.columns = [fn]

    # 超额
    df_excess = df_long.copy()
    df_excess['benchmark'] = df_index
    df_excess = df_excess / df_excess.iloc[0,:]
    for i in df_long.columns:
        df_excess[i] = df_excess[i]  / df_excess['benchmark']
    df_excess = pd.DataFrame(df_excess[df_long.columns])
    df_excess_ret = pd.DataFrame(df_excess.pct_change().iloc[-1,:])
    df_excess_ret.columns = [fn]

    # 格式调整
    long_ret = pd.DataFrame(df_long.pct_change())
    long_ret = long_ret.stack().reset_index()
    long_ret.columns = ['t_date','class','ret']
    long_ret['style'] = 'long'
    
    excess_ret = pd.DataFrame(df_excess.pct_change())
    excess_ret = excess_ret.stack().reset_index()
    excess_ret.columns = ['t_date','class','ret']
    excess_ret['style'] = 'excess'
    
    # 把多头和超额净值存在sql底库
    conf = {'username': 'hs_wangwenjie',
            'password': 'hs_wangwenjie#A0',
            'host': '192.168.201.185',
            'port': 3306,
            'db': 'wangwj'}
    url = 'mysql+pymysql://%s:%s@%s:%s/%s' % (conf['username'], conf['password'], conf['host'], conf['port'], conf['db'])
    engine = create_engine(url)

    # first_time_to_sql:
    #f_data = pd.concat([long_ret, excess_ret], axis=0)
    #f_data.insert(1, 'factor_name', fn)
    #f_data.to_sql('convfactor_ret', engine, index=False, if_exists='append')
    #f_data.insert(5, 't_entry_time', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    
    # 增量更新：
    query = ("""select DISTINCT t_date from convfactor_ret where factor_name = '%s' ORDER BY t_date""") % (fn)
    last_date = pd.read_sql_query(query, engine).iloc[-1].tolist()[0]
    f_data = pd.concat([long_ret, excess_ret], axis=0)
    f_data.insert(1, 'factor_name', fn)
    f_data = f_data[f_data.t_date > last_date]
    f_data.insert(5, 't_entry_time', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    f_data.to_sql('convfactor_ret', engine, index=False, if_exists='append')
    
    # 换手率存在sql底库
    # first_time_to_sql:
    #f_data = pd.DataFrame(df_change.stack()).reset_index()
    #f_data.columns = ['t_date','class','turnover']
    #f_data = f_data[f_data.t_date > '2022-06-30']
    #f_data.insert(1, 'factor_name', fn)
    #f_data.insert(4, 't_entry_time', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    #f_data.to_sql('convfactor_turnover', engine, index=False, if_exists='append')
    
    # 增量更新：
    query = ("""select DISTINCT t_date from convfactor_turnover where factor_name = '%s' ORDER BY t_date""") % (fn)
    last_date = pd.read_sql_query(query, engine).iloc[-1].tolist()[0]
    #first_date = pd.read_sql_query(query, engine).iloc[0].tolist()[0]
    f_data = pd.DataFrame(df_change.stack()).reset_index()
    f_data.columns = ['t_date','class','turnover']
    f_data.insert(1, 'factor_name', fn)
    f_data = f_data[f_data.t_date > pd.to_datetime(last_date).strftime('%Y-%m-%d')]
    #f_data = f_data[f_data.t_date < pd.to_datetime(first_date).strftime('%Y-%m-%d')]
    f_data.insert(4, 't_entry_time', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    f_data.to_sql('convfactor_turnover', engine, index=False, if_exists='append')
    
    # 计算换手率
    df_change = pd.DataFrame(df_change.iloc[-1,:])
    df_change.columns = [fn]
    
    return df_long_ret, df_excess_ret, df_change


#%% IC计算
def cal_ic(fn):
    """
    计算因子IC值
    """
    df_factor = get_factor_data(fn, df_filtered, Date_list)
    rank_ic = list()
    #norm_ic = list()
    factor_dateindex = pd.DataFrame(df_factor.t_date.unique())
    factor_dateindex = factor_dateindex.iloc[:,0].tolist()
    print('计算IC因子值')
    cal_time = time.time()
    for dt in tqdm(factor_dateindex):
        rank_ic.append(df_factor[df_factor.t_date == dt][['value','ret_nextday']].corr(method='spearman').iloc[0][1])
        #norm_ic.append(df_factor[df_factor.t_date == dt][['value','ret_next5m']].corr(method='pearson').iloc[0][1])
    print('计算用时: {:.2f}s'.format(time.time() - cal_time))
    data_ic = pd.DataFrame()
    data_ic['{}'.format(fn)] = pd.Series(rank_ic, index=factor_dateindex)
    #data_ic['norm_ic'] = pd.Series(norm_ic, index=factor_dateindex)
    data_ic[data_ic==0] = np.nan
    
    ic_avg = data_ic.mean(0) # 计算IC均值
    ic_nav= data_ic.copy()
    ic_nav = np.cumsum(ic_nav) # 计算累计IC净值
    ic_nav.name = fn
    
    # IC均值存在sql底库
    conf = {'username': 'hs_wangwenjie',
            'password': 'hs_wangwenjie#A0',
            'host': '192.168.201.185',
            'port': 3306,
            'db': 'wangwj'}
    url = 'mysql+pymysql://%s:%s@%s:%s/%s' % (conf['username'], conf['password'], conf['host'], conf['port'], conf['db'])
    engine = create_engine(url)
    # first_time_to_sql:
    #f_data = pd.DataFrame(data_ic.stack()).reset_index()
    #f_data.columns = ['t_date','factor_name','ic']
    #f_data.t_date = [datetime.date.strftime(x,'%Y-%m-%d') for x in pd.to_datetime(f_data.t_date)]
    #f_data.insert(3, 't_entry_time', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    #f_data.to_sql('convfactor_ic', engine, index=False, if_exists='append')
    
    # 增量更新：
    query = ("""select DISTINCT t_date from convfactor_ic where factor_name = '%s' ORDER BY t_date""") % (fn)
    last_date = pd.read_sql_query(query, engine).iloc[-1].tolist()[0]
    #first_date = pd.read_sql_query(query, engine).iloc[0].tolist()[0]
    f_data = pd.DataFrame(data_ic.stack()).reset_index()
    f_data.columns = ['t_date','factor_name','ic']
    f_data.t_date = [datetime.date.strftime(x,'%Y-%m-%d') for x in pd.to_datetime(f_data.t_date)]
    f_data = f_data[f_data.t_date > pd.to_datetime(last_date).strftime('%Y-%m-%d')]
    f_data.t_date = [datetime.date.strftime(x,'%Y-%m-%d') for x in pd.to_datetime(f_data.t_date)]
    #f_data = f_data[f_data.t_date < first_date.strftime('%Y-%m-%d')]
    f_data.insert(3, 't_entry_time', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
    f_data.to_sql('convfactor_ic', engine, index=False, if_exists='append')
    
    return ic_avg, ic_nav


#%% excute
if __name__ == '__main__':
    #end = '2025-01-08'
    end = datetime.datetime.strftime(datetime.datetime.today() - datetime.timedelta(days=1), '%Y-%m-%d')
    #start = '2022-06-30'
    start = datetime.datetime.strptime(end, '%Y-%m-%d') - datetime.timedelta(days=90)
    method = 'close'
    freq = 1
    path = os.getcwd()
    new_dir = os.path.join(path, 'output/daily_tracking')
    if not os.path.exists(new_dir):
        os.makedirs(new_dir)
    
    print('源数据初筛&合并处理')
    data_filter.data_filter()
    
    print('更新因子数据')
    lst_sec = [
               'sec_convprice',
               'sec_convsize',
               'sec_conv_stocksize',
               'sec_ytm',
               'sec_bond_premium',
               'sec_IV_BS',
               'sec_IV_delta',
               'sec_weightskew_63D',
               'sec_convturnover',
               'sec_volatility5d',
               'sec_conv_amplitude',
               'sec_volstability_10d',
               'sec_ret',
               'sec_priceratio_5d',
               'sec_convPremium',
               'sec_modified_premium',
               'sec_double_low',
               'sec_amplitude_delta',
               'sec_ret_delta',
               'sec_stock_mv',
               'sec_stock_ret_20D',
               'sec_EP_net_ttm',
               'sec_EP_deducted_ttm',
               'sec_SUE',
               'sec_SUE_after'
               ]
        
    lst_ts = [
              'ts_convprice_roll20d',
              'ts_convturnover_roll20d',
              'ts_volatility5d_roll20d',
              'ts_amplitude_roll20d',
              'ts_conv_premium_roll20d',
              'ts_modified_premium_roll20d',
              'ts_double_low_roll20d',
              'ts_ret_delta_rol20d',
              'ts_bond_premium_roll20d',
              'ts_ret_roll20d',
              'ts_IV_delta_roll20d',
              'ts_IV_BS_roll20d',
              'ts_ytm_roll20d',
              'ts_volumerank_roll10d',
              ]
    
    print('更新截面类因子')
    for i in lst_sec:
        print('正在更新{}因子'.format(i))
        factor_calculate.output_database(i, '2018-01-02', str(date.today()), freq, first_time_to_sql=False)
    
    print('更新时序类因子')
    for i in lst_ts:
        print('正在更新{}因子'.format(i))
        factor_calculate.output_database(i, '2018-01-02', str(date.today()), freq, first_time_to_sql=False)
    
    
    print('因子收益计算')
    # 提取基准指数
    df_index = get_index(start, end, method) 
    df_index = df_index / df_index.iloc[0,:]
    
    # 提取转债收益率
    df_filtered = pd.read_csv(os.path.join('data', 'data_after_filter.csv'))
    df_filtered['Date'] = pd.to_datetime(df_filtered['Date'])
    if method == 'close':
        df_filtered = df_filtered[['Date','c_bondCode','ret_nextday_close']]
    else:
        df_filtered = df_filtered[['Date','c_bondCode','ret_nextday_open']]
    df_filtered = df_filtered[(df_filtered.Date >= start) & (df_filtered.Date <= end)]
    Date_list = df_filtered[df_filtered.Date >= start].Date.unique()
    df_filtered = df_filtered.set_index(['Date','c_bondCode'])
    
    # 提取数据
    factor_names = [
                    'sec_convprice',
                    'sec_convsize',
                    'sec_conv_stocksize',
                    'sec_ytm',
                    'sec_bond_premium',
                    'sec_IV_BS',
                    'sec_IV_delta',
                    'sec_weightskew_63D',
                    'sec_convturnover',
                    'sec_volatility5d',
                    'sec_conv_amplitude',
                    'sec_volstability_10d',
                    'sec_ret',
                    'sec_priceratio_5d',
                    'sec_convPremium',
                    'sec_modified_premium',
                    'sec_double_low',
                    'sec_amplitude_delta',
                    'sec_ret_delta',
                    'sec_stock_mv',
                    'sec_stock_ret_20D',
                    'sec_EP_net_ttm',
                    'sec_EP_deducted_ttm',
                    'sec_SUE',
                    'sec_SUE_after',
                    
                    'ts_convprice_roll20d',
                    'ts_convturnover_roll20d',
                    'ts_volatility5d_roll20d',
                    'ts_amplitude_roll20d',
                    'ts_conv_premium_roll20d',
                    'ts_modified_premium_roll20d',
                    'ts_double_low_roll20d',
                    'ts_ret_delta_rol20d',
                    'ts_bond_premium_roll20d',
                    'ts_ret_roll20d',
                    'ts_IV_delta_roll20d',
                    'ts_IV_BS_roll20d',
                    'ts_ytm_roll20d',
                    'ts_volumerank_roll10d'
                    ]

    ret_long = [] # 多头
    ret_excess = [] # 超额
    day_change = [] # 换手
    for fn in factor_names:
        print(fn)
        df_long_ret, df_excess_ret, df_change = cal_return(fn, method)
        ret_long.append(df_long_ret)
        ret_excess.append(df_excess_ret)
        day_change.append(df_change)
        ic_avg, ic_nav = cal_ic(fn)
    
    # 表格输出
    ret_long = pd.concat(ret_long,axis=1).T
    ret_excess = pd.concat(ret_excess,axis=1).T
    day_change = pd.concat(day_change,axis=1).T
    wb = pd.ExcelWriter(os.path.join(new_dir, "日频因子收益跟踪_{}_{}.xlsx").format(method, end))
    ret_long.to_excel(wb, sheet_name = '多头')
    ret_excess.to_excel(wb, sheet_name = '超额')
    day_change.to_excel(wb, sheet_name = '换手')
    wb.save()
    
    
    