#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Datetime: 2020/9/1 16:33
# @Author  : CHEN Wang
# @Site    :
# @File    : fac_corr_analysis.py
# @Software: PyCharm

"""
脚本说明: correlation analysis for factors
"""

import pandas as pd
import numpy as np
import os
from pylab import mpl
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.cluster import KMeans, AgglomerativeClustering, Birch, affinity_propagation
from sklearn.covariance import GraphicalLassoCV
from statsmodels.stats.outliers_influence import variance_inflation_factor
from quant_researcher.quant.factors.factor_preprocess.preprocess import standardize
from quant_researcher.quant.factors.factor_analysis.factor_analyser.factor_analyse import FactorAnalyseMachine
from quant_researcher.quant.project_tool.localize import TEST_DIR, DATA_DIR


mpl.rcParams['font.sans-serif'] = ['SimHei']  # 设置字体为黑体
mpl.rcParams['axes.unicode_minus'] = False  # 解决中文字体负号显示不正常问题


#%% 横截面因子处理方法
def hist_mean_corr(factor_df):
    """
    求算截面因子暴露相关性的历史均值，如果是两个因子画因子暴露相关性随时间变化的图，如果是多个因子画出相关性热力图

    :param pd.DataFrame factor_df: 需包含tradedate和资产代码列。tradedate为因子暴露的日期,
        资产代码为 ‘资产类型+code‘，例如股票为stockcode，基金为fundcode等。其余列均为因子值.
               tradedate   stockcode  cetop   quick_ratio
        1      2019-12-02    000002  0.1927     0.414500
        2      2019-12-02    000004 -0.0363     7.842500
        3      2019-12-02    000005  0.0212     0.948700
        4      2019-12-02    000006  0.2008     0.905100
        5      2019-12-02    000007 -0.0066     2.415800
    :param str asset_type: 资产类型
    :return: pd.Dataframe
                       cetop  quick_ratio
        cetop        1.000000    -0.019941
        quick_ratio -0.019941     1.000000
    """

    factors = factor_df.columns[2:].tolist()
    factor_num = len(factors)
    # 作数据处理，求区间内因子暴露度协方差矩阵的各个元素的均值
    corr_df = factor_df.groupby('tradedate')[factors].corr()
    corr_df2 = corr_df.apply(lambda x: np.array(x), axis=1)
    corr_df2 = corr_df2.rename('corr').reset_index()
    corr_mean = corr_df2.groupby('level_1')['corr'].apply(lambda x: np.mean([x], axis=1)[0])
    corr_mean = np.mat(corr_mean.tolist())
    corr_mean = pd.DataFrame(corr_mean, columns=factors, index=factors).astype(float)
    sns.heatmap(corr_mean, annot=True, vmax=1, vmin=-1, square=True, annot_kws={'size': 12}, cmap='viridis', fmt='.3f')
    plt.show()

    if factor_num == 2:
        cor_ts = corr_df.iloc[:, 0]
        cor_ts2 = cor_ts.where(cor_ts != 1).dropna()
        cor_ts2 = cor_ts2.reset_index().drop(columns=['level_1']).set_index('tradedate')
        plt.plot(cor_ts2)
        plt.xticks(rotation=60)
        plt.show()
    return corr_mean


# IC/RankIC相关系数矩阵
def IC_corr(df, begin, end, asset_type, direction, period='monthly', freq=1, winsor=True,
            standard=True, rank=True):
    """
    计算因子之间IC/RankIC相关系数的矩阵

    :param df: DataFrame, 需包含tradedate和资产代码列。tradedate为因子暴露的日期,
    资产代码为 ‘资产类型+code‘，例如股票为stockcode，基金为fundcode等。其余列均为因子值.
    :param begin: str, 分析开始时间
    :param end: str, 分析结束时间
    :param asset_type: 目前asset_type支持'stock', 'manager', 'fund', 'sw', 'csi';
    'sw' 和'csi' 分别代表申万一级行业和中证一级行业
    :param direction: list, 因子作用方向, 因子方向正向为1，负向为-1
    :param period: str, 调仓周期
    :param freq: str, 调仓频率
    :param winsor: bool, 是否去极值
    :param standard: bool, 是否标准化
    :param rank: bool, 是否采用秩相关系数
    :return:
    """
    df[f'{asset_type}code'] = df[f'{asset_type}code'].astype(str)
    FactorAnalyser = FactorAnalyseMachine(factor_name='',  # 分析因子名称
                                          begin_date=begin,  # 分析开始时间
                                          end_date=end,  # 分析结束时间
                                          external_data=True,
                                          asset_type=asset_type,
                                          factor_data=df,
                                          direction=direction)  # 因子方向正向为1，负向为-1，

    FactorAnalyser.param_setting(period=period,  # 调仓周期
                                 universe='HS300',  # 标的池仅对股票有效，基金和基金经理对应标的暂定为上证综指
                                 benchmark=None,  # 基准代码，如果输入avg则表示所有资产池的平均值
                                 freq=freq,  # 调仓频率
                                 bool_exclude_industry=True,  # 中性化是否剔除行业因子，仅对股票有效
                                 list_exclude_style=[])  # 中性化是否剔除风格因子，仅对股票有效

    FactorAnalyser.initial_data(winsor=winsor, standard=standard, rank=rank, method='rank_product',
                                window=5, orthogonalize=False)  # 数据初始化

    factor_ic_ts_df = pd.DataFrame(FactorAnalyser.ic_ts)
    sns.heatmap(factor_ic_ts_df.corr(), annot=True, vmax=1, vmin=-1, square=True,
                annot_kws={'size': 12}, cmap='viridis', fmt='.3f')
    plt.show()
    return factor_ic_ts_df.corr()


# 方差膨胀系数
def VIF_check(df, begin, end, asset_type, direction, period, freq):
    """
    检查是否有因子的方差膨胀系数过大

    :param df: DataFrame, 需包含tradedate和资产代码列。tradedate为因子暴露的日期,
    资产代码为 ‘资产类型+code‘，例如股票为stockcode，基金为fundcode等。其余列均为因子值.
    :param begin: str, 分析开始时间
    :param end: str, 分析结束时间
    :param asset_type: 目前asset_type支持'stock', 'manager', 'fund', 'sw', 'csi';
    'sw' 和'csi' 分别代表申万一级行业和中证一级行业
    :param direction: list, 因子作用方向, 因子方向正向为1，负向为-1
    :param period: str, 调仓周期
    :param freq: str, 调仓频率
    :return:
    """
    factors = df.columns.difference(['tradedate', f'{asset_type}code']).tolist()
    FactorAnalyser = FactorAnalyseMachine(factor_name='',  # 分析因子名称
                                          begin_date=begin,  # 分析开始时间
                                          end_date=end,  # 分析结束时间
                                          external_data=True,
                                          asset_type=asset_type,
                                          factor_data=df,
                                          direction=direction)  # 因子方向正向为1，负向为-1，

    FactorAnalyser.param_setting(period=period,  # 调仓周期
                                 universe='HS300',  # 标的池仅对股票有效，基金和基金经理对应标的暂定为上证综指
                                 benchmark=None,  # 基准代码，如果输入avg则表示所有资产池的平均值
                                 freq=freq,  # 调仓频率
                                 bool_exclude_industry=True,  # 中性化是否剔除行业因子，仅对股票有效
                                 list_exclude_style=[])  # 中性化是否剔除风格因子，仅对股票有效

    FactorAnalyser.get_asset_pool()
    FactorAnalyser.get_asset_data()

    # 需要先用任意一个因子获取行情数据, 算出下一期收益率
    stock_returns = FactorAnalyser.all_stock_price.set_index(['tradedate', f'{asset_type}code']) \
        ['fap_close'].unstack().pct_change().shift(-1).fillna(0).stack().rename('ret').reset_index()
    stock_returns[f'{asset_type}code'] = stock_returns[f'{asset_type}code'].astype(str)
    # 将下一期收益率与因子数据合并，以便计算VIF
    all = df.merge(stock_returns, on=['tradedate', f'{asset_type}code'], how='left')
    X, y = all.loc[:, factors], all.loc[:, 'ret']
    vif = pd.DataFrame()
    vif['VIF Factor'] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
    vif['features'] = X.columns
    vif['advice'] = ['drop' if ele > 10 else 'keep' for ele in vif['VIF Factor']]
    return vif


# %% 时间序列因子处理方法
# 检测相关性heatmap


# 提取一堆因子中非共线性部分： 方法有逐步回归；PCA; LASSO; Clustering）
def factor_clustering(factor_df, method='affinity_propagation', **kwargs):
    """
    用聚类方法将因子池中的因子进行聚类

    :param str end_date: 计算日期
    :param str method: 聚类方法，默认为'k-means'， 支持'Birch', 'Agglomerative', 'affinity_propagation'
    :return:
    """

    # 去重
    factor_df.drop_duplicates(inplace=True)
    factor_df.sort_index(inplace=True)
    # 因为因子的起始时间不一致，因此把因子成分两次测试； 以2020年为界
    temp_factor_df = factor_df.T
    factor_df_with_long_history = temp_factor_df[~temp_factor_df['2020-01-01'].isnull()].T
    factor_df_with_short_history = temp_factor_df[temp_factor_df['2020-01-01'].isnull()]
    factor_df_with_short_history_2020 = factor_df_with_short_history[~factor_df_with_short_history['2021-01-01'].isnull()].T
    factor_df_with_short_history_2021 = factor_df_with_short_history[factor_df_with_short_history['2021-01-01'].isnull()].T

    cluster_df_list = []
    for df in [factor_df_with_short_history_2020, factor_df_with_short_history_2021, factor_df_with_long_history]:
        factor_df = df.copy()
        # 去NA
        factor_df.dropna(how='any', axis=0, inplace=True)
        # 获取因子名称
        factor_list = list(factor_df.columns)
        # 收益率数据标准化
        filter_factor_df = factor_df.apply(standardize, axis=0)

        if method in ['k-means', 'Agglomerative', 'Birch']:
            dataset = filter_factor_df.T
            if method == 'k-means':
                model = KMeans(n_clusters=100)
            elif method == 'Agglomerative':
                model = AgglomerativeClustering(n_clusters=100)
            elif method == 'Birch':
                model = Birch(threshold=0.01, branching_factor=100, n_clusters=100)
            model.fit(dataset)
            label_pred = model.labels_

        elif method == 'affinity_propagation':
            # Affinity Propagation（近邻传播），适合因子池比较小的情况
            dataset = np.array(filter_factor_df)
            edge_model = GraphicalLassoCV(n_jobs=-2, cv=5)
            edge_model.fit(dataset)
            _, label_pred = affinity_propagation(edge_model.covariance_)
        else:
            raise NotImplementedError

        cluster_df = pd.DataFrame({'factor_name': factor_list, 'label': label_pred})
        cluster_count = cluster_df.groupby('label')['factor_name'].count().sort_values()  # 各簇元素个数
        cluster_df = cluster_df.set_index('factor_name')
        cluster_df.sort_values(by='label', inplace=True)
        cluster_df_list.append(cluster_df)

        # if method == 'affinity_propagation':
        #     visual_stock_relationship(dataset, edge_model, label_pred, factor_list)

    max_cluster_num1 = max(cluster_df_list[0]['label'])
    cluster_df_list[1] = cluster_df_list[1] + max_cluster_num1 + 1
    max_cluster_num2 = max(cluster_df_list[1]['label'])
    cluster_df_list[2] = cluster_df_list[2] + max_cluster_num2 + 1
    cluster_df = pd.concat(cluster_df_list, axis=0)

    return cluster_df


def visual_stock_relationship(dataset, edge_model, labels, stock_names):
    """"
        https: // juejin.cn / post / 6844903671415701518
        https://github.com/RayDean/MachineLearning/blob/master/FireAI_026_AffinityProp.ipynb
        https://blog.csdn.net/The_Time_Runner/article/details/89949921
    """

    node_position_model = manifold.LocallyLinearEmbedding(
        n_components=2, eigen_solver='dense', n_neighbors=6)

    embedding = node_position_model.fit_transform(dataset.T).T

    plt.figure(1, facecolor='w', figsize=(10, 8))
    plt.clf()
    ax = plt.axes([0., 0., 1., 1.])
    plt.axis('off')

    # Display a graph of the partial correlations
    partial_correlations = edge_model.precision_.copy()
    d = 1 / np.sqrt(np.diag(partial_correlations))
    partial_correlations *= d
    partial_correlations *= d[:, np.newaxis]
    non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)

    # Plot the nodes using the coordinates of our embedding
    plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
                cmap=plt.cm.nipy_spectral)

    # Plot the edges
    start_idx, end_idx = np.where(non_zero)
    # a sequence of (*line0*, *line1*, *line2*), where::
    #            linen = (x0, y0), (x1, y1), ... (xm, ym)
    segments = [[embedding[:, start], embedding[:, stop]]
                for start, stop in zip(start_idx, end_idx)]
    values = np.abs(partial_correlations[non_zero])
    lc = LineCollection(segments,
                        zorder=0, cmap=plt.cm.hot_r,
                        norm=plt.Normalize(0, .7 * values.max()))
    lc.set_array(values)
    lc.set_linewidths(15 * values)
    ax.add_collection(lc)

    # Add a label to each node. The challenge here is that we want to
    # position the labels to avoid overlap with other labels
    n_labels = max(labels)
    for index, (name, label, (x, y)) in enumerate(
            zip(stock_names, labels, embedding.T)):

        dx = x - embedding[0]
        dx[index] = 1
        dy = y - embedding[1]
        dy[index] = 1
        this_dx = dx[np.argmin(np.abs(dy))]
        this_dy = dy[np.argmin(np.abs(dx))]
        if this_dx > 0:
            horizontalalignment = 'left'
            x = x + .001
        else:
            horizontalalignment = 'right'
            x = x - .001
        if this_dy > 0:
            verticalalignment = 'bottom'
            y = y + .001
        else:
            verticalalignment = 'top'
            y = y - .001
        plt.text(x, y, name, size=10, fontproperties='SimHei',
                 horizontalalignment=horizontalalignment,
                 verticalalignment=verticalalignment,
                 bbox=dict(facecolor='w',
                           edgecolor=plt.cm.nipy_spectral(label / float(n_labels)),
                           alpha=.6))

    plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
             embedding[0].max() + .10 * embedding[0].ptp(), )
    plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
             embedding[1].max() + .03 * embedding[1].ptp())

    plt.show()


if __name__ == '__main__':
    # 获取测试数据1
    # factor_list = ['cetop', 'quick_ratio']
    # begin = '2020-05-30'
    # end = '2020-08-31'
    # asset_type = 'stock'
    # period = 'monthly'
    # freq = 1
    # winsor = True
    # standard = True
    # rank = True
    # all_factor_df = pd.DataFrame()
    # for factor in factor_list:
    #     # factor_df = get_stock_factor_exposure(factor, stock_pool=None, start=begin, end=end)
    #     factor_df = factor_exposure_related.get_stock_factor_exposure(factor, None, begin, end)
    #     factor_df = factor_df.rename(columns={'end_date': 'tradedate', 'stock_code': 'stockcode'})
    #     factor_df.set_index(['tradedate', f'{asset_type}code'], inplace=True)
    #     all_factor_df = pd.concat([all_factor_df, factor_df], axis=1)
    # df = all_factor_df.reset_index().dropna()
    # df['tradedate'] = df['tradedate'].astype(str)

    # 获取测试数据2
    # from quant_researcher.quant.datasource_fetch.crypto_api.glassnode import get_prices, get_ret, all_http, \
    #     get_indicators
    # from quant_researcher.quant.datasource_fetch.crypto_api import sanbase
    # from quant_researcher.quant.datasource_fetch.crypto_api.self_defined import onchain_metrics_list, \
    #     trading_metrics_list

    # asset = 'BTC'
    # start_date = '2015-01-01'
    # end_date = '2021-11-13'
    # factor_list = ['Addresses in Loss', 'Velocity', 'Coin Years Destroyed (CYD)', 'Supply-Adjusted CYD',
    #                'Entity-Adjusted CYD', 'Entity- and Supply-Adjusted CYD', 'Stock-to-Flow Deflection',
    #                'Stablecoin Supply Ratio (SSR) Oscillator', 'Spent Outputs more 10y', 'Bitcoin Volatility Index (BVIN)',
    #                'Options ATM Implied Volatility (1 Week)', 'Options ATM Implied Volatility (1 Month)',
    #                'Cash-Margined Futures Open Interest', 'Crypto-Margined Futures Open Interest', 'Futures Volume Perpetual']
    #
    #
    # self_defined_metrics = onchain_metrics_list + trading_metrics_list
    # social_metrics = sanbase.get_social_metrics_list(asset='bitcoin')
    #
    # factor_df_list = []
    # for factor_name in factor_list:
    #     if factor_name in self_defined_metrics:
    #         if factor_name in onchain_metrics_list:
    #             file_path1 = os.path.join(DATA_DIR, f'onchain_data')
    #         if factor_name in trading_metrics_list:
    #             file_path1 = os.path.join(DATA_DIR, f'trading_data')
    #         file_name1 = os.path.join(file_path1, f'{factor_name}')
    #         origin_factor_df = pd.read_csv(f'{file_name1}.csv', index_col='end_date')
    #         origin_factor_df = origin_factor_df.loc[start_date:end_date, :]
    #     elif factor_name in social_metrics:
    #         # origin_factor_df = sanbase.get_indicators(indic_name=factor_name, asset=asset, start_date=start_date, end_date=end_date)
    #         file_path1 = os.path.join(DATA_DIR, f'social_data')
    #         file_name1 = os.path.join(file_path1, f'{factor_name}')
    #         origin_factor_df = pd.read_csv(f'{file_name1}.csv', index_col='end_date')
    #         origin_factor_df = origin_factor_df.loc[start_date:end_date, :]
    #     else:
    #         origin_factor_df = get_indicators(indic_name=factor_name, asset=asset, start_date=start_date, end_date=end_date)
    #     factor_df_list.append(origin_factor_df)
    # factor_df = pd.concat(factor_df_list, axis=1)
    # file_name = os.path.join(DATA_DIR, f'精选指标数据')
    # factor_df.to_csv(f'{file_name}.csv')

    file_name = os.path.join(DATA_DIR, f'精选指标数据')
    factor_df = pd.read_csv(f'{file_name}.csv', index_col=0)
    factor_clustering(factor_df)


    # 测试 hist_mean_corr
    # corr = hist_mean_corr(df)
    # 测试 IC_corr
    # factor_ic_ts_corr = IC_corr(df, begin, end, asset_type, [1, 1], period, freq, winsor, standard, rank)
    # 测试 VIF_check
    # vif = VIF_check(df, begin, end, asset_type, [1, 1], period, freq)
    # 测试正交化
    # fac_o = Schimidt_orth(df.set_index(['tradedate', f'{asset_type}code']), asset_type)
    # fac_o2 = PCA_orth(df.set_index(['tradedate', f'{asset_type}code']), asset_type)
    # fac_o3 = symmetric_orth(df.set_index(['tradedate', f'{asset_type}code']), asset_type)