#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Datetime: 2020/9/1 19:07
# @Author  : CHEN Wang
# @Site    :
# @File    : factor_analyse_timing.py
# @Software: PyCharm

"""
脚本说明: test of efficiency of timing factors

参考： https://www.lagou.com/lgeduarticle/59526.html
"""
import os
import copy
import math
import sys
import scipy.stats as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import scipy.stats as stats
from scipy.signal import hilbert, butter, filtfilt
from quant_researcher.quant.project_tool.localize import DATA_DIR
from scipy.fftpack import fft, fftfreq, rfft, irfft, ifft
from quant_researcher.quant.factors.factor_preprocess.preprocess import de_noise, de_trend, TP_analysis
from dtw import accelerated_dtw
from statsmodels.tsa.stattools import grangercausalitytests, adfuller

plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号


def corr(data_df, method='pearson', if_plot=False):
    """
    很多包都应用了皮尔逊相关，包括 Numpy、Scipy 和 Pandas。
    如果你的数据中包含了空值或者缺失值，Pandas 中的相关性方法将会在计算前把这些行丢弃，
    而如果你想使用 Numpy 或者 Scipy 对于皮尔逊相关的应用，你则必须手动清除掉这些数据。

    :param data_df:
    :param method:
    :return:
    """

    if method == 'pearson':
        corr = data_df.corr().iloc[0, 1]
        print(f"Pandas computed Pearson r: {corr}")

        r, p = stats.pearsonr(df.dropna()[:, 0], df.dropna()[:, 1])
        print(f"Scipy computed Pearson r: {r} and p-value: {p}")
    else:
        raise NotImplementedError

    if if_plot:
        # 计算滑动窗口同步性
        f, ax = plt.subplots(figsize=(7, 3))
        df.rolling(window=30, center=True).median().plot(ax=ax)
        ax.set(xlabel='Time', ylabel='Pearson r')
        ax.set(title=f"Overall Pearson r = {np.round(corr, 2)}")

    return corr


def rolling_corr(asset_data, window=8, if_plot=True):
    """
    计算因子的时间序列相关性

    :param pd.DataFrame, asset_data: 必须含有两列, 一列ret为标的收益率, 一列signal为交易信号,
                                  其中, signal为1,即买入, signal为0即卖出
    :param int, window: 滚动计算时间序列相关性的窗口长度
    :return:
    """
    # 插入缺失值
    df_interpolated = asset_data.interpolate()

    data = df_interpolated.copy()
    data['ret'] = data['ret'].shift(-1).fillna(value=0)
    cor_ts = data.rolling(window=window, min_periods=1).corr()['ret'].reset_index()
    cor_ts = cor_ts.where(cor_ts['level_1'] == 'signal').dropna().iloc[:, [0, -1]]
    cor_ts.columns = ['tradedate', 'cor']

    if if_plot:
        f, ax = plt.subplots(2, 1, figsize=(14, 6), sharex=True)
        df.rolling(window=30, center=True).median().plot(ax=ax[0])
        ax[0].set(xlabel='Frame', ylabel='Smiling Evidence')
        cor_ts.plot(ax=ax[1])
        ax[1].set(xlabel='Frame', ylabel='Pearson r')
        plt.suptitle("Smiling data and rolling window correlation")

    return cor_ts


def lag_corr(datax, datay, lag=0, wrap=False):
    """
    Lag-N cross correlation

    :param pd.Series datax: 测试序列
    :param pd.Series datay: 基准序列
    :param int lag: 滞后期数
    :param bool, wrap: 滞后的时候是否是填充NaN 还是认为y是一个循环。默认为False, 即填充NaN

    :return:

    """
    if wrap:
        shiftedy = datay.shift(lag)
        if lag > 0:
            shiftedy.iloc[:lag] = datay.iloc[-lag:].values
        else:
            shiftedy.iloc[lag:] = datay.iloc[:-lag].values
        return datax.corr(shiftedy)

    else:
        corr = datax.corr(datay.shift(lag).dropna())
        return corr


def period_lag_corr(datax, datay, lag_period=12, if_plot=False, **kwargs):
    """
    分别计算超前滞后多少期的相关性

    :param pd.Series datax: 测试序列
    :param pd.Series datay: 基准序列
    :param int lag_period: 最多滞后领先多少期, 会计算滞后[-lag_period, lag_period]不同期数的相关性
    :param kwargs:
        - file_name: 图片保存名
    :return:
    """

    corr_list = [lag_corr(datax, datay, i) for i in range((-lag_period), int(lag_period + 1))]
    corr_series = pd.Series(corr_list, index=range((-lag_period), int(lag_period + 1)))
    offset = corr_series.idxmax()  # x相对于y的领先期数， 负数为领先，正数为滞后

    if if_plot:
        f, ax = plt.subplots(figsize=(15, 3))
        ax.plot(corr_series)
        ax.axvline(0, color='k', linestyle='--', label='Center')
        ax.axvline(offset, color='r', linestyle='--', label='Peak_R')
        ax.set(title=f'Offset = {offset} frames\n  datax leads <> datay leads', xlabel='Offset', ylabel='Pearson r')
        plt.legend()

        file_name = kwargs.pop('file_name', None)
        if file_name is not None:
            plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')

        plt.show()

    return corr_series, offset


def window_period_lag_corr(datax, datay, lag_period=12, window=300, rolling=True, rolling_step=60, if_plot=False,
                           **kwargs):
    """
    period_lag_corr计算的结果可能会受所选样本影响，因此可以进行样本采样（滚动窗口或者固定窗口），计算不同样本下的领先滞后期数是否稳定

    :param pd.Series datax: 测试序列
    :param pd.Series datay: 基准序列
    :param int lag_period: 最多滞后领先多少期, 会计算滞后[-lag_period, lag_period]不同期数的相关性
    :param window:  样本窗口大小
    :param rolling: 窗口是滚动，还是固定
    :param rolling_step: 如果窗口是滚动的，则滚动窗口调整的步伐是多大
    :param if_plot: 是否作图
    :param kwargs:
        - file_name: 图片保存名
    :return:
    """

    # 固定窗的时间滞后互相关
    if not rolling:
        no_splits = int(np.ceil(datax.shape[0] / window))
        corr_series_list = []
        offset_list = []
        for t in range(0, no_splits):
            d1 = datax.loc[(t) * window:(t + 1) * window]
            d2 = datay.loc[(t) * window:(t + 1) * window]
            corr_series, offset = period_lag_corr(d1, d2, lag_period)
            corr_series_list.append(corr_series)
            offset_list.append(offset)

    # 滚动窗口的时间滞后互相关
    elif rolling:
        corr_series_list = []
        offset_list = []
        for t in range(0, len(datax) - window, rolling_step):  # 滚动的步长默认为60
            d1 = datax.iloc[t:(t + window)]
            d2 = datay.iloc[t:(t + window)]
            corr_series, offset = period_lag_corr(d1, d2, lag_period)
            corr_series_list.append(corr_series)
            offset_list.append(offset)

    corr_df = pd.concat(corr_series_list, axis=1).T

    if if_plot:
        f, ax = plt.subplots(figsize=(10, 5))
        sns.heatmap(corr_df, cmap='RdBu_r', ax=ax)
        ax.set(title=f'Windowed Time Lagged Cross Correlation', xlabel='Offset', ylabel='Window epochs')
        # plt.legend()

        file_name = kwargs.pop('file_name', None)
        if file_name is not None:
            plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')

        plt.show()

    return corr_df, offset_list


def regression_test(datax, datay, if_plot=False):
    """
    回归检验，看T值，P值，R2
    参考： https://zhuanlan.zhihu.com/p/388954249

    :param pd.Series datax: x序列
    :param pd.Series datay: y序列
    :param if_plot:

    :return:
    """
    factor_name = datax.name if datax.name else 0  # 因子名称如未指定，则设为0

    X = sm.add_constant(datax)
    model_result = sm.OLS(datay, X, missing='drop').fit()

    # 查看模型结果
    # print(model_result.summary())

    # 回归系数表汇总提取
    coef_df = pd.DataFrame({"params": model_result.params,  # 回归系数
                            "std err": model_result.bse,  # 回归系数标准差
                            "t": round(model_result.tvalues, 3),  # 回归系数T值
                            "p-values": round(model_result.pvalues, 3),  # 回归系数P值
                            "rsquared": round(model_result.rsquared_adj, 2)})

    coef_df[['coef_0.025', 'coef_0.975']] = model_result.conf_int()  # 回归系数置信区间 默认5%，括号中可填具体数字 比如0.05, 0.1

    predict_output = model_result.fittedvalues
    df = pd.concat([datax, predict_output], axis=1)
    df.sort_values(by=factor_name, inplace=True)

    if if_plot:
        f, ax = plt.subplots(figsize=(10, 5))
        ax.plot(datax, datay, 'o', color='blue')
        ax.plot(df.iloc[:, 0], df.iloc[:, 1], color='red')
        ax.set(title=f'regression test', xlabel='X', ylabel='Y')
        plt.legend()
        plt.show()

    result = coef_df.loc[factor_name, :]

    return result


def lag_regression_test(datax, datay, lag=0, wrap=False, if_plot=False):
    """
    测试滞后回归

    :param pd.Series datax: 测试序列
    :param pd.Series datay: 基准序列
    :param int lag: 滞后期数
    :param bool, wrap: 滞后的时候是否是填充NaN 还是认为y是一个循环。默认为False, 即填充NaN

    :return:

    """

    shiftedy = datay.shift(lag)

    if wrap:
        if lag > 0:
            shiftedy.iloc[:lag] = datay.iloc[-lag:].values
        else:
            shiftedy.iloc[lag:] = datay.iloc[:-lag].values

    result = regression_test(datax, shiftedy, if_plot=if_plot)
    result.name = lag
    return result


def period_lag_regression_test(datax, datay, lag_period=12, if_plot=False, **kwargs):
    """
    分别计算超前滞后多少期的相关性

    :param pd.Series datax: x序列
    :param pd.Series datay: y序列
    :param int lag_period: 最多滞后领先多少期, 会计算滞后[-lag_period, lag_period]不同期数的相关性
    :param kwargs:
        - file_name: 表格保存名
    :return:
    """

    regression_test_list = [lag_regression_test(datax, datay, i) for i in range((-lag_period), int(lag_period + 1))]
    regression_df = pd.concat(regression_test_list, axis=1)

    p_value_series = regression_df.loc['p-values', :]
    offset = p_value_series.idxmin()  # x相对于y的领先期数， 负数为领先，正数为滞后

    file_name = kwargs.pop('file_name', None)

    if if_plot:
        f, ax = plt.subplots(figsize=(15, 3))
        ax.plot(p_value_series)
        ax.axvline(0, color='k', linestyle='--', label='Center')
        ax.axvline(offset, color='b', linestyle='--', label='min_p_value')
        ax.axhline(0.05, color='r', linestyle='--', label='P_value_threshold')
        ax.set(title=f'period_lag_regression_test\n Offset = {offset} frames\n datax leads <> datay leads',
               xlabel='lag_period', ylabel='P_value')
        plt.legend()

        if file_name is not None:
            plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')
            regression_df.to_excel(f'{file_name}.xlsx')

        plt.show()

    return regression_df, offset


def window_period_lag_regression_test(datax, datay, lag_period=12, window=300, rolling=True, rolling_step=60,
                                      if_plot=False, **kwargs):
    """
    period_lag_corr计算的结果可能会受所选样本影响，因此可以进行样本采样（滚动窗口或者固定窗口），计算不同样本下的领先滞后期数是否稳定

    :param pd.Series datax: 测试序列
    :param pd.Series datay: 基准序列
    :param int lag_period: 最多滞后领先多少期, 会计算滞后[-lag_period, lag_period]不同期数的相关性
    :param window:  样本窗口大小
    :param rolling: 窗口是滚动，还是固定
    :param rolling_step: 如果窗口是滚动的，则滚动窗口调整的步伐是多大, 滚动的步长默认为60
    :param if_plot: 是否作图
    :param kwargs:
        - file_name: 图片保存名
    :return:
    """

    p_values_series_list = []
    offset_list = []

    # 固定窗的时间滞后互相关
    if not rolling:
        no_splits = int(np.ceil(datax.shape[0] / window))
        for t in range(0, no_splits):
            d1 = datax.loc[(t) * window:(t + 1) * window]
            d2 = datay.loc[(t) * window:(t + 1) * window]
            regression_df, offset = period_lag_regression_test(d1, d2, lag_period)
            p_values_series = (regression_df.loc['p-values', :]).rename(f'Window epochs {t}')
            p_values_series_list.append(p_values_series)
            offset_list.append(offset)

    # 滚动窗口的时间滞后互相关
    elif rolling:
        for t in range(0, len(datax) - window, rolling_step):
            d1 = datax.iloc[t:(t + window)]
            d2 = datay.iloc[t:(t + window)]
            regression_df, offset = period_lag_regression_test(d1, d2, lag_period)
            p_values_series = (regression_df.loc['p-values', :]).rename(f'Window epochs {t}')
            p_values_series_list.append(p_values_series)
            offset_list.append(offset)

    p_values_df = pd.concat(p_values_series_list, axis=1).T

    if if_plot:
        f, ax = plt.subplots(figsize=(10, 5))
        sns.heatmap(p_values_df, cmap='RdBu_r', ax=ax)
        ax.set(title=f'Windowed Time Lagged p_values', xlabel='Offset', ylabel='Window epochs')
        # plt.legend()

        file_name = kwargs.pop('file_name', None)
        if file_name is not None:
            plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')

        plt.show()

    return p_values_df, offset_list


def t_test(asset_data):
    """
    择时因子有效性的均值t检验，即买入信号卖出信号对应收益率序列是否有显著差异

    :param asset_data: DataFrame, 必须含有两列, 一列ret为标的收益率, 一列signal为交易信号(1对应买入，0对应卖出)

    :return:
    """

    flag = True

    asset_up = asset_data['ret'][asset_data['signal'] == 1].dropna()
    asset_down = asset_data['ret'][asset_data['signal'] == 0].dropna()

    # 检验方差齐性, 原假设为两组序列来自相同方差样本
    st1, pvalue = st.levene(asset_up, asset_down)

    # 两独立样本t检验
    if pvalue < 0.1:  # 拒绝原假设
        st2, pvalue2 = st.ttest_ind(asset_up, asset_down, equal_var=False)
    else:
        st2, pvalue2 = st.ttest_ind(asset_up, asset_down)

    # 在90%的置信水平下，判断择时因子变动对收益率序列有无显著影响
    if pvalue2 > 0.1:
        flag = False

    return flag, pvalue2


def lag_t_test(asset_data, lag=0, wrap=False):
    """
    测试滞后t检验

    :param asset_data: DataFrame, 必须含有两列, 一列ret为标的收益率, 一列signal为交易信号(1对应买入，0对应卖出)
    :param int lag: 滞后期数
    :param bool, wrap: 滞后的时候是否是填充NaN 还是认为y是一个循环。默认为False, 即填充NaN

    :return:

    """

    data = asset_data.copy()

    if wrap:
        if lag > 0:
            data['ret'].iloc[:lag] = data['ret'].iloc[-lag:].values
        else:
            data['ret'].iloc[lag:] = data['ret'].iloc[:-lag].values
    else:
        data['ret'] = data['ret'].shift(lag)

    flag, p_value = t_test(data)

    return flag, p_value


def period_lag_t_test(asset_data, lag_period=12, if_plot=False, **kwargs):
    """
    分别计算超前滞后多少期的相关性

    :param asset_data: DataFrame, 必须含有两列, 一列ret为标的收益率, 一列signal为交易信号(1对应买入，0对应卖出)
    :param int lag_period: 最多滞后领先多少期, 会计算滞后[-lag_period, lag_period]不同期数的相关性
    :param kwargs:
        - file_name: 表格保存名
    :return:
    """

    p_value_list = [lag_t_test(asset_data, i)[1] for i in range((-lag_period), int(lag_period + 1))]
    p_value_series = pd.Series(p_value_list, index=range((-lag_period), int(lag_period + 1)))
    offset = p_value_series.idxmin()  # signal相对于ret的领先期数， 负数为领先，正数为滞后

    if if_plot:
        f, ax = plt.subplots(figsize=(15, 3))
        ax.plot(p_value_series)
        ax.axvline(0, color='k', linestyle='--', label='Center')
        ax.axvline(offset, color='b', linestyle='--', label='min_p_value')
        ax.axhline(0.05, color='r', linestyle='--', label='P_value_threshold')
        ax.set(title=f'period_lag_t_test\n Offset = {offset} frames\n signal leads <> ret leads',
               xlabel='lag_period', ylabel='P_value')
        plt.legend()

        file_name = kwargs.pop('file_name', None)
        if file_name is not None:
            plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')

        plt.show()

    return p_value_series, offset


def window_period_lag_t_test(asset_data, lag_period=12, window=300, rolling=True, rolling_step=60, if_plot=False,
                             **kwargs):
    """
    period_lag_corr计算的结果可能会受所选样本影响，因此可以进行样本采样（滚动窗口或者固定窗口），计算不同样本下的领先滞后期数是否稳定

    :param asset_data: DataFrame, 必须含有两列, 一列ret为标的收益率, 一列signal为交易信号(1对应买入，0对应卖出)
    :param int lag_period: 最多滞后领先多少期, 会计算滞后[-lag_period, lag_period]不同期数的相关性
    :param window:  样本窗口大小
    :param rolling: 窗口是滚动，还是固定
    :param rolling_step: 如果窗口是滚动的，则滚动窗口调整的步伐是多大, 滚动的步长默认为60
    :param if_plot: 是否作图
    :param kwargs:
        - file_name: 图片保存名
    :return:
    """

    data = asset_data.dropna()

    p_values_series_list = []
    offset_list = []

    # 固定窗的时间滞后互相关
    if not rolling:
        no_splits = int(np.ceil(data.shape[0] / window))
        for t in range(0, no_splits):
            data1 = data.iloc[(t) * window:(t + 1) * window, :]
            p_value_series, offset = period_lag_t_test(data1, lag_period)
            p_values_series = p_value_series.rename(f'Window epochs {t}')
            p_values_series_list.append(p_values_series)
            offset_list.append(offset)

    # 滚动窗口的时间滞后互相关
    elif rolling:
        for t in range(0, data.shape[0] - window, rolling_step):
            data1 = data.iloc[t:(t + window), :]
            p_value_series, offset = period_lag_t_test(data1, lag_period)
            p_values_series = p_value_series.rename(f'Window epochs {t}')
            p_values_series_list.append(p_values_series)
            offset_list.append(offset)

    p_values_df = pd.concat(p_values_series_list, axis=1).T

    if if_plot:
        f, ax = plt.subplots(figsize=(10, 5))
        sns.heatmap(p_values_df, cmap='RdBu_r', ax=ax)
        ax.set(title=f'Windowed Time Lagged t_test p_values', xlabel='Offset', ylabel='Window epochs')
        # plt.legend()

        file_name = kwargs.pop('file_name', None)
        if file_name is not None:
            plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')

        plt.show()

    return p_values_df, offset_list


def DTW(datax, datay, if_plot=False):
    """
    动态时间扭曲（DTW）是一种计算两信号间路径的方法，它能最小化两信号之间的距离。
    这种方法最大的优势就是他能处理不同长度的信号。最初它是为了进行语言分析而被发明出来.
    DTW 通过计算每一帧对于其他所有帧的欧几里得距离，计算出能匹配两个信号的最小距离。
    一个缺点就是它无法处理缺失值，所以如果你的数据点有缺失，你需要提前插入数据。

    https://www.youtube.com/watch?v=_K1OsqCicBY

    :param datax:
    :param datay:
    :param if_plot:
    :return:
    """

    d1 = datax.interpolate().values
    d2 = datay.interpolate().values
    d, cost_matrix, acc_cost_matrix, path = accelerated_dtw(d1, d2, dist='euclidean')

    if if_plot:
        plt.imshow(acc_cost_matrix.T, origin='lower', cmap='gray', interpolation='nearest')
        plt.plot(path[0], path[1], 'w')
        plt.xlabel('datax')
        plt.ylabel('datay')
        plt.title(f'DTW Minimum Path with minimum distance: {np.round(d, 2)}')
        plt.show()

    return d, path


def phase_synchrony(datax, datay, if_plot=False):
    """
    如果你有一段时间序列数据，你认为它可能有振荡特性（例如 EEG 和 fMRI），此时你也可以测量瞬时相位同步。
    它也可以计算两个信号间每一时刻的同步性。这个结果可能会因人而异因为你需要过滤数据以获得你感兴趣的波长信号，
    但是你可能只有未经实践的某些原因来确定这些波段。
    为了计算相位同步性，我们需要提取信号的相位，这可以通过使用希尔伯特变换来完成，希尔波特变换会将信号的相位和能量拆分开
    （你可以在这里学习更多关于希尔伯特变换的知识 https://www.youtube.com/watch?v=VyLU8hlhI-I）。
    这让我们能够评估两个信号是否同相位（两个信号一起增强或减弱）。

    瞬时相位同步测算是计算两个信号每一刻同步性的很好的方法，并且它不需要我们像计算滑动窗口相关性那样任意规定窗口宽度。
    如果你想要知道瞬时相位同步和窗口相关性的比对，可以在这里查看
    http://jinhyuncheong.com/jekyll/update/2017/12/10/Timeseries_synchrony_tutorial_and_simulations.html。

    :param datax:
    :param datay:
    :param if_plot:
    :return:
    """

    def butter_bandpass(lowcut, highcut, fs, order=5):
        nyq = 0.5 * fs
        low = lowcut / nyq
        high = highcut / nyq
        b, a = butter(order, [low, high], btype='band')
        return b, a

    def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
        b, a = butter_bandpass(lowcut, highcut, fs, order=order)
        y = filtfilt(b, a, data)
        return y

    lowcut = .01
    highcut = .5
    fs = 30.
    order = 1
    d1 = datax.interpolate().values
    d2 = datay.interpolate().values
    y1 = butter_bandpass_filter(d1, lowcut=lowcut, highcut=highcut, fs=fs, order=order)
    y2 = butter_bandpass_filter(d2, lowcut=lowcut, highcut=highcut, fs=fs, order=order)

    al1 = np.angle(hilbert(y1), deg=False)
    al2 = np.angle(hilbert(y2), deg=False)
    N = len(al1)

    # 绘制结果
    f, ax = plt.subplots(3, 1, figsize=(14, 7), sharex=True)
    ax[0].plot(y1, color='r', label='y1')
    ax[0].plot(y2, color='b', label='y2')
    ax[0].legend(bbox_to_anchor=(0., 1.02, 1., .102), ncol=2)
    ax[0].set(xlim=[0, N], title='Filtered Timeseries Data')
    ax[1].plot(al1, color='r')
    ax[1].plot(al2, color='b')
    ax[1].set(ylabel='Angle', title='Angle at each Timepoint', xlim=[0, N])
    phase_synchrony = 1 - np.sin(np.abs(al1 - al2) / 2)
    ax[2].plot(phase_synchrony)
    ax[2].set(ylim=[0, 1.1], xlim=[0, N], title='Instantaneous Phase Synchrony', xlabel='Time',
              ylabel='Phase Synchrony')
    plt.tight_layout()
    plt.show()

    return phase_synchrony


def KL_divergence(datax, datay, lag=0, if_plot=False):
    """
    KL（ Kullback–Leibler） Divergence中文译作KL散度，
    从信息论角度来讲，这个指标就是信息增益（Information Gain）或相对熵（Relative Entropy），
    用于衡量一个分布相对于另一个分布的差异性，注意，这个指标不能用作距离衡量，因为该指标不具有对称性，
    即两个分布P和Q，DKL(P|Q)与DKL(Q|P)计算的值一般不相等

    :param datax:
    :param datay:
    :param lag:
    :param if_plot:
    :return:
    """

    # 保证数据index对应
    all_data = pd.concat([datax.to_frame(), datay.to_frame()], axis=1).dropna()
    datax = datax[all_data.index]
    datay = datay[all_data.index]

    if datax.shape != datay.shape:
        raise ValueError("datax and datay must have same shape.")

    # Note that when scipy.stats.entropy is used, it will normalize the probabilities to one.
    # From the docs (scipy.github.io/devdocs/generated/scipy.stats.entropy.html):
    # "This routine will normalize pk and qk if they don’t sum to 1."
    # KL = stats.entropy(list(datax), list(datay.shift(lag)))

    # Epsilon is used here to avoid conditional code for checking that neither P nor Q is equal to 0.
    epsilon = 0.00001

    # You may want to instead make copies to avoid changing the np arrays.

    datax = datax + epsilon
    datay = datay + epsilon

    datay = datay.shift(lag).dropna()
    datax = datax[datay.index]
    datax = datax / datax.sum()
    datay = datay / datay.sum()

    KL = np.sum(datax * np.log(datax / datay))

    return KL


def period_KL_divergence(datax, datay, lag_period=12, if_plot=False):
    KL_list = [KL_divergence(datax, datay, i) for i in range((-lag_period), int(lag_period + 1))]
    KL_series = pd.Series(KL_list, index=range((-lag_period), int(lag_period + 1)))
    offset = KL_series.idxmin()  # x相对于y的领先期数， 负数为领先，正数为滞后

    if if_plot:
        f, ax = plt.subplots(figsize=(15, 3))
        ax.plot(KL_series)
        ax.axvline(0, color='k', linestyle='--', label='Center')
        ax.axvline(offset, color='r', linestyle='--', label='Peak_R')
        ax.set(title=f'Offset = {offset} \n  datax leads <> datay leads', xlabel='Offset', ylabel='min KL')
        plt.legend()
        plt.show()

    return KL_series, offset


def noise_volatility_ratio(datax, lamda=None, if_plot=False):
    de_noised_signal, noise = de_noise(datax, lamda=lamda, if_plot=if_plot)
    volatility_ratio = noise.std() / de_noised_signal.std()

    return volatility_ratio


def granger_test(asset_data, factor, p_value=0.05):
    """
    对原始因子进行格兰杰因果检验

    :param asset_data: DataFrame, 必须含有ret（收益率）和因子数据列
    :param factor: str, 因子名, 以此因子名为列名的因子数据列 必须在asset_data中
    :return:
    """
    # # 去趋势化
    # asset_data = asset_data.diff().dropna()
    # # 去均值
    # asset_data = asset_data - asset_data.mean()
    # # ADF平稳性检验 原假设为序列非平稳 - 序列需为平稳的时间序列
    # print(f'Adfuller stationarity test of factor {factor} \n')
    # stationarity_test = adfuller(asset_data[factor])
    # result = pd.Series(stationarity_test[0:4], index=['Test Statistic', 'p-value',
    #                                                   '#Lags used',
    #                                                   'No of Observations used'])
    # for key, value in stationarity_test[4].items():
    #     result[f'Critical Value {key}'] = value
    # print('Results of Dickey-Fuller Test:')
    # print(result)
    # if abs(stationarity_test[0]) < abs(stationarity_test[4]['1%']):
    #     raise RuntimeError('因子序列非平稳，需进行差分，收益率序列需同步进行差分')
    # print('Adfuller stationarity test of returns \n')
    # stationarity_test = adfuller(asset_data['ret'])
    # result = pd.Series(stationarity_test[0:4], index=['Test Statistic', 'p-value',
    #                                                   '#Lags used',
    #                                                   'No of Observations used'])
    # for key, value in stationarity_test[4].items():
    #     result[f'Critical Value {key}'] = value
    # print('Results of Dickey-Fuller Test:')
    # print(result)
    # if abs(stationarity_test[0]) < abs(stationarity_test[4]['1%']):
    #     raise RuntimeError('收益率序列非平稳，需进行差分，因子序列需同步进行差分')
    casuality = True

    def get_Granger_optimal_lag(granger_test_result):
        """
        获取格兰杰因果检验中 最优的滞后阶数

        :param granger_test_result: 格兰杰检验的对象
        :return:
        """
        optimal_lag = -1
        F_test = -1.0
        for key in granger_test_result.keys():
            _F_test_ = granger_test_result[key][0]['params_ftest'][0]
            if _F_test_ > F_test:
                F_test = _F_test_
                optimal_lag = key
        optimal_test_avg_p = \
            pd.DataFrame.from_dict(granger_test_result[optimal_lag][0], orient='index')[1].mean()
        return optimal_lag, optimal_test_avg_p

    print(f'Testing whether factor {factor} is Granger Cause of returns')
    print('------------------------------------------------------------')
    granger_result1 = grangercausalitytests(asset_data[['ret', factor]].dropna(), maxlag=5)
    opt_lag, optimal_test_avg_p = get_Granger_optimal_lag(granger_result1)
    print(f'Factor {factor} optimal lag: {opt_lag} and optimal p_value:{optimal_test_avg_p}')
    print('\n')
    print(f'Testing whether return series is the Granger cause of factor {factor}')
    print('------------------------------------------------------------')
    # grangercausalitytests(asset_data[[factor,'ret']].dropna(), maxlag=5)
    return granger_result1


class match_tp:
    def __init__(self, cleaned_factor_data, cleaned_target_data, unsmooth_factor_data, unsmooth_target_data,
                 data_frequency='month', if_plot=False):
        """
        拐点匹配， turning point matching

        :param cleaned_factor_data: 清洗过的指标数据
        :param cleaned_target_data: 清洗过的目标数据
        :param unsmooth_factor_data: 指标原数据，用于精准匹配拐点日期，因为滤波之后可能会有些偏移
        :param unsmooth_target_data: 目标原数据，用于精准匹配拐点日期，因为滤波之后可能会有些偏移
        :param data_frequency: 指标频率，支持'day', 'month', 'season'
                               用于决定数据前后需要剔除多少数据，以及波峰波谷直接需要至少间隔多久，
                               以及用原始指标精准寻找最高最低点日期时的搜索间隔范围
        :param bool if_plot: 是否画图，默认为False

        """
        self.cleaned_factor_data = cleaned_factor_data
        self.cleaned_target_data = cleaned_target_data
        self.unsmooth_factor_data = unsmooth_factor_data
        self.unsmooth_target_data = unsmooth_target_data
        self.data_frequency = data_frequency

        Factor_TP = TP_analysis(self.cleaned_factor_data, self.unsmooth_factor_data, self.data_frequency)
        self.factor_tp_df = Factor_TP.ana_tp(if_plot)
        Target_TP = TP_analysis(self.cleaned_target_data, self.unsmooth_target_data, self.data_frequency)
        self.target_tp_df = Target_TP.ana_tp(if_plot)

    def search_matching_tp(self, n_p, n_l, if_plot, file_name):
        """
        往前n_p期寻找对应目标拐点，往后n_l期寻找对应目标拐点

        :param int n_p: 由data_frequency决定
        :param int n_l: 由data_frequency决定
        :return:
            matched_max_tp_df
                                 factor_max           target_max       lag_period
                2003-08-31  2003-08-31 00:00:00  2004-03-31 00:00:00         -7
                2008-01-31  2008-01-31 00:00:00  2007-10-31 00:00:00          3
                2009-11-30  2009-11-30 00:00:00  2009-11-30 00:00:00          0
                2013-04-30  2013-04-30 00:00:00  2013-01-31 00:00:00          3
                2016-01-31  2016-01-31 00:00:00  2015-05-31 00:00:00          8
            lag_period : 因子拐点相对于匹配的目标拐点滞后的期数， 负数为领先， 正数为滞后

        """

        max_tp_df = pd.DataFrame(columns=['factor_max', 'target_max', 'lag_period'])
        min_tp_df = pd.DataFrame(columns=['factor_min', 'target_min', 'lag_period'])

        for d in self.factor_tp_df.index:  # 针对每个候选指标拐点，在匹配范围内匹配对应的基准指标拐点
            tp_pos = self.unsmooth_factor_data.index.tolist().index(d)  # 对应的第几个数据
            begin_index = tp_pos - n_p
            end_index = tp_pos + n_l
            if begin_index < 0:
                begin_index = 0
            # 拐点匹配日期范围
            target_range = self.unsmooth_factor_data.index.tolist()[begin_index:end_index]  # 搜索日期范围
            # 拐点匹配日期范围内所有基准指标拐点
            # matched_target = self.target_tp_df[np.array(target_range[0] < self.target_tp_df.index) &
            #                                    np.array(self.target_tp_df.index < target_range[-1])]
            matched_target = self.target_tp_df.loc[target_range[0]:target_range[-1]]
            if self.factor_tp_df.loc[d, 'max'] == 1:  # 如果候选指标拐点为极大值点
                matched_target_max = matched_target[matched_target['max'] == 1]
                if matched_target_max.shape[0] == 0:  # 没有找到匹配的极大值点
                    max_tp_df.loc[d, 'factor_max'] = d
                    max_tp_df.loc[d, 'target_max'] = np.nan
                    max_tp_df.loc[d, 'lag_period'] = np.nan
                    continue
                else:
                    max_tp_df.loc[d, 'factor_max'] = d
                    max_tp_df.loc[d, 'target_max'] = matched_target_max.index[-1]
                    max_tp_df.loc[d, 'lag_period'] = self.unsmooth_factor_data.index.tolist().index(d) - \
                                                     self.unsmooth_factor_data.index.tolist().index(
                                                         matched_target_max.index[-1])

            elif self.factor_tp_df.loc[d, 'min'] == 1:  # 如果候选指标拐点为极小值点
                matched_target_min = matched_target[matched_target['min'] == 1]
                if matched_target_min.shape[0] == 0:  # 没有找到匹配的极小值点
                    min_tp_df.loc[d, 'factor_min'] = d
                    min_tp_df.loc[d, 'target_min'] = np.nan
                    min_tp_df.loc[d, 'lag_period'] = np.nan
                    continue
                else:
                    min_tp_df.loc[d, 'factor_min'] = d
                    min_tp_df.loc[d, 'target_min'] = matched_target_min.index[-1]
                    min_tp_df.loc[d, 'lag_period'] = self.unsmooth_factor_data.index.tolist().index(d) - \
                                                     self.unsmooth_factor_data.index.tolist().index(
                                                         matched_target_min.index[-1])

        self.max_tp_df = max_tp_df
        self.min_tp_df = min_tp_df

        if if_plot:
            self.plot_matched_tp(file_name)

        temp_tp1 = max_tp_df.rename(columns={'factor_max': 'factor_tp', 'target_max': 'target_tp'})
        temp_tp1['tp_type'] = 'max'
        temp_tp2 = min_tp_df.rename(columns={'factor_min': 'factor_tp', 'target_min': 'target_tp'})
        temp_tp2['tp_type'] = 'min'
        all_tp_df = pd.concat([temp_tp1, temp_tp2]).sort_values(by='factor_tp')  # 所有指标拐点的匹配情况

        max_matched_tp_df = temp_tp1[~temp_tp1['target_tp'].isnull()]  # 所有匹配了的指标峰值拐点情况
        min_matched_tp_df = temp_tp2[~temp_tp2['target_tp'].isnull()]  # 所有匹配了的指标谷底拐点情况
        all_matched_tp_df = all_tp_df[~all_tp_df['target_tp'].isnull()]  # 所有匹配了的指标拐点情况

        # 候选指标拐点匹配情况统计
        tp_statistics = {}

        tp_statistics['total_tp_num'] = all_tp_df.shape[0]  # 候选指标总拐点个数
        tp_statistics['max_tp_num'] = max_tp_df.shape[0]  # 候选指标峰点个数
        tp_statistics['min_tp_num'] = min_tp_df.shape[0]  # 候选指标谷底个数

        tp_statistics['total_matched_tp_num'] = all_matched_tp_df.shape[0]  # 找到匹配点的拐点个数
        tp_statistics['max_matched_tp_num'] = max_matched_tp_df.shape[0]  # 找到匹配点的峰值拐点个数
        tp_statistics['min_matched_tp_num'] = min_matched_tp_df.shape[0]  # 找到匹配点的谷底拐点个数

        tp_statistics['total_matched_pct'] = tp_statistics['total_matched_tp_num'] / tp_statistics['total_tp_num'] if \
            tp_statistics['total_tp_num'] else 0
        tp_statistics['max_matched_pct'] = tp_statistics['max_matched_tp_num'] / tp_statistics['max_tp_num'] if \
            tp_statistics['max_tp_num'] else 0
        tp_statistics['min_matched_pct'] = tp_statistics['min_matched_tp_num'] / tp_statistics['min_tp_num'] if \
            tp_statistics['min_tp_num'] else 0

        tp_statistics['total_mean_lag_period'] = all_matched_tp_df['lag_period'].mean()
        tp_statistics['total_std_lag_period'] = all_matched_tp_df['lag_period'].std()
        tp_statistics['max_mean_lag_period'] = max_matched_tp_df['lag_period'].mean()
        tp_statistics['max_std_lag_period'] = max_matched_tp_df['lag_period'].std()
        tp_statistics['min_mean_lag_period'] = min_matched_tp_df['lag_period'].mean()
        tp_statistics['min_std_lag_period'] = min_matched_tp_df['lag_period'].std()

        if tp_statistics['total_matched_tp_num'] == 0:
            tp_statistics['total_leading_pct'] = 0
        else:
            tp_statistics['total_leading_pct'] = (all_matched_tp_df['lag_period'] < 0).astype(int).sum() / \
                                                 tp_statistics['total_matched_tp_num']

        if tp_statistics['max_matched_tp_num'] == 0:
            tp_statistics['max_leading_pct'] = 0
        else:
            tp_statistics['max_leading_pct'] = (max_matched_tp_df['lag_period'] < 0).astype(int).sum() / tp_statistics[
                'max_matched_tp_num']

        if tp_statistics['min_matched_tp_num'] == 0:
            tp_statistics['min_leading_pct'] = 0
        else:
            tp_statistics['min_leading_pct'] = (min_matched_tp_df['lag_period'] < 0).astype(int).sum() / tp_statistics[
                'min_matched_tp_num']

        self.all_tp_df = all_tp_df
        self.all_matched_tp_df = all_matched_tp_df
        self.matched_tp_statistics = pd.Series(tp_statistics)

        if file_name is not None:
            self.matched_tp_statistics.to_excel(f'{file_name}.xlsx')
            self.all_tp_df.to_excel(f'{file_name}-tp_data.xlsx')

    def plot_matched_tp(self, file_name=None):
        max_tp_df = copy.deepcopy(self.max_tp_df).dropna()
        min_tp_df = copy.deepcopy(self.min_tp_df).dropna()

        fig, ax1 = plt.subplots()
        line1 = ax1.plot(self.unsmooth_factor_data, color='orange', label=self.unsmooth_factor_data.name)
        ax1.plot(max_tp_df['factor_max'], self.unsmooth_factor_data.loc[max_tp_df['factor_max']], 'o', color='tab:red')
        ax1.plot(min_tp_df['factor_min'], self.unsmooth_factor_data.loc[min_tp_df['factor_min']], 'o',
                 color='tab:green')

        ax2 = ax1.twinx()
        line2 = ax2.plot(self.unsmooth_target_data, color='blue', label=self.unsmooth_target_data.name)
        ax2.plot(max_tp_df['target_max'], self.unsmooth_target_data.loc[max_tp_df['target_max']], 'o', color='tab:red')
        ax2.plot(min_tp_df['target_min'], self.unsmooth_target_data.loc[min_tp_df['target_min']], 'o',
                 color='tab:green')

        for i in range(max_tp_df.shape[0]):
            ax1.axvspan(max_tp_df['factor_max'][i], max_tp_df['target_max'][i], alpha=0.3, color='red')
        for i in range(min_tp_df.shape[0]):
            ax1.axvspan(min_tp_df['factor_min'][i], min_tp_df['target_min'][i], alpha=0.3, color='green')

        lines = line1 + line2
        labs = [l.get_label() for l in lines]
        ax1.legend(lines, labs, loc='best')

        if file_name is not None:
            plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')

        plt.show()

    def match(self, if_plot=False, file_name=None):
        if self.data_frequency == 'day':
            n_p = n_l = 30
        elif self.data_frequency == 'month':
            n_p = n_l = 15
        elif self.data_frequency == 'season':
            n_p = n_l = 8
        self.search_matching_tp(n_p, n_l, if_plot=if_plot, file_name=file_name)

        return self.all_tp_df, self.all_matched_tp_df, self.matched_tp_statistics


def hurst(ts):
    """
    https://github.com/RyanWangZf/Hurst-exponent-R-S-analysis-/blob/master/Hurst.py
    https://www.jianshu.com/p/77b957d5b554?from=singlemessage

    :param ts:
    :return: 返回hurst指数，
            如果0.5<hurst<1, 说明时间序列存在记忆性，存在趋势。
            如果0<hurst<0.5, 说明时间序列不存在记忆性，是均值回复过程。
            如果H=0.5，表明时间序列可以用随机游走来描述。
    """
    ts = list(ts)
    N = len(ts)
    if N < 20:
        raise ValueError(
            "Time series is too short! input series ought to have at least 20 samples!")

    max_k = int(np.floor(N / 2))
    R_S_dict = []
    for k in range(10, max_k + 1):
        R, S = 0, 0
        # split ts into subsets
        subset_list = [ts[i:i + k] for i in range(0, N, k)]
        if np.mod(N, k) > 0:
            subset_list.pop()
        # calc mean of every subset
        mean_list = [np.mean(x) for x in subset_list]
        for i in range(len(subset_list)):
            cumsum_list = pd.Series(subset_list[i] - mean_list[i]).cumsum()
            R += max(cumsum_list) - min(cumsum_list)
            S += np.std(subset_list[i])
        R_S_dict.append({"R": R / len(subset_list), "S": S / len(subset_list), "n": k})

    log_R_S = []
    log_n = []
    print(R_S_dict)
    for i in range(len(R_S_dict)):
        R_S = (R_S_dict[i]["R"] + np.spacing(1)) / (R_S_dict[i]["S"] + np.spacing(1))
        log_R_S.append(np.log(R_S))
        log_n.append(np.log(R_S_dict[i]["n"]))

    Hurst_exponent = np.polyfit(log_n, log_R_S, 1)[0]
    return Hurst_exponent


class hurst_analysis:
    """
    Performs RS analysis on data stored in a List()

    """

    def __init__(self):
        pass

    def run(self, series, exponent=None):
        '''
        :type series: List
        :type exponent: int
        :rtype: float
        '''
        try:
            return self.calculateHurst(series, exponent)
        except Exception as e:
            print("   Error: %s" % e)

    def bestExponent(self, seriesLenght):
        '''
        :type seriesLenght: int
        :rtype: int
        '''
        i = 0
        cont = True
        while (cont):
            if (int(seriesLenght / int(math.pow(2, i))) <= 1):
                cont = False
            else:
                i += 1
        return int(i - 1)

    def mean(self, series, start, limit):
        '''
        :type start: int
        :type limit: int
        :rtype: float
        '''
        return float(np.mean(series[start:limit]))

    def sumDeviation(self, deviation):
        '''
        :type deviation: list()
        :rtype: list()
        '''
        return np.cumsum(deviation)

    def deviation(self, series, start, limit, mean):
        '''
        :type start: int
        :type limit: int
        :type mean: int
        :rtype: list()
        '''
        d = []
        for x in range(start, limit):
            d.append(float(series[x] - mean))
        return d

    def standartDeviation(self, series, start, limit):
        '''
        :type start: int
        :type limit: int
        :rtype: float
        '''
        return float(np.std(series[start:limit]))

    def calculateHurst(self, series, exponent=None):
        '''
        :type series: List
        :type exponent: int
        :rtype: float
        '''
        rescaledRange = list()
        sizeRange = list()
        rescaledRangeMean = list()

        if (exponent is None):
            exponent = self.bestExponent(len(series))

        for i in range(0, exponent):
            partsNumber = int(math.pow(2, i))
            size = int(len(series) / partsNumber)

            sizeRange.append(size)
            rescaledRange.append(0)
            rescaledRangeMean.append(0)

            for x in range(0, partsNumber):
                start = int(size * (x))
                limit = int(size * (x + 1))

                deviationAcumulative = self.sumDeviation(self.deviation(
                    series, start, limit, self.mean(series, start, limit)))
                deviationsDifference = float(
                    max(deviationAcumulative) - min(deviationAcumulative))
                standartDeviation = self.standartDeviation(
                    series, start, limit)

                if (deviationsDifference != 0 and standartDeviation != 0):
                    rescaledRange[i] += (deviationsDifference /
                                         standartDeviation)

        y = 0
        for x in rescaledRange:
            rescaledRangeMean[y] = x / int(math.pow(2, y))
            y = y + 1

        # log calculation
        rescaledRangeLog = list()
        sizeRangeLog = list()
        for i in range(0, exponent):
            rescaledRangeLog.append(math.log(rescaledRangeMean[i], 10))
            sizeRangeLog.append(math.log(sizeRange[i], 10))

        slope, intercept = np.polyfit(sizeRangeLog, rescaledRangeLog, 1)

        ablineValues = [slope * i + intercept for i in sizeRangeLog]

        plt.plot(sizeRangeLog, rescaledRangeLog, '--')
        plt.plot(sizeRangeLog, ablineValues, 'b')
        plt.title(slope)
        # graphic dimension settings
        limitUp = 0
        if (max(sizeRangeLog) > max(rescaledRangeLog)):
            limitUp = max(sizeRangeLog)
        else:
            limitUp = max(rescaledRangeLog)
        limitDown = 0
        if (min(sizeRangeLog) > min(rescaledRangeLog)):
            limitDown = min(rescaledRangeLog)
        else:
            limitDown = min(sizeRangeLog)
        plt.gca().set_xlim(limitDown, limitUp)
        plt.gca().set_ylim(limitDown, limitUp)
        print("Hurst exponent: " + str(slope))
        plt.show()

        return slope

    def quit(self):
        raise SystemExit()


def check_stationary(data_df):
    '''
    检查每一列数据是否平稳，返回一个列表包含所有平稳的列名
    '''

    def c_s(series):
        if adfuller(series.dropna())[1] < 0.1:
            return series.name
        else:
            return np.nan

    stationary_list = data_df.apply(c_s, axis=0).dropna()

    return stationary_list


def backtest_select(ret, factor: pd.DataFrame, method, plot=True, layout=None, **method_args):
    """
    根据回测结果，筛选择时因子 # todo 后续该函数淘汰，被下面backtest_test替代

    :param ret:
    :param factor: 注意入参必须为dataframe，如需要测试单一因子，请加双括号factor[[column]]
    :param method: 信号生成方法，支持rank_to_signal，bband_to_signal和ma_crossing
    :param plot:
    :param layout:layout相乘必须与因子个数相同，如6个因子，layout可以为（3,2），（2,3）或（6,1）等
    :param method_args: 信号生成方法的参数
    :return:
    """

    single_factor = (factor.shape[1] == 1)
    signal_p = factor.apply(method, **method_args)
    signal_n = factor.apply(method, positive=False, **method_args)
    test_ret = pd.DataFrame([ret.values] * factor.shape[1], columns=ret.index, index=factor.columns).T
    test_ret_positive = test_ret.where(signal_p > 0, 0)
    test_ret_negative = test_ret.where(signal_n > 0, 0)
    test_ret = (test_ret + 1).cumprod()
    equity_p = (test_ret_positive + 1).cumprod()
    equity_n = (test_ret_negative + 1).cumprod()
    p_factor = equity_p.columns[(equity_p - test_ret).iloc[-1] > 0]
    if len(p_factor) == 0:
        p_factor = equity_p.iloc[-1].sort_values().index[-2:]
        # p_factor = equity_p.columns[max_equity]
    n_factor = equity_n.columns[(equity_n - test_ret).iloc[-1] > 0]
    if len(n_factor) == 0:
        n_factor = equity_n.iloc[-1].sort_values().index[-2:]
        # n_factor = equity_n.columns[max_equity]

    # test_ret_df = pd.concat([test_ret_df, ret.rename('Benchmark')], axis=1)
    # equity_bench = (ret + 1).cumprod().rename('Benchmark')
    if plot:
        test_ret.columns = test_ret.shape[1] * ['Benchmark']
        equity_p = equity_p.add_suffix('_positive')
        equity_n = equity_n.add_suffix('_negative')
        figsize = (16, 9) if single_factor else (4 * layout[1], 2 * layout[0])
        layout = None if single_factor else layout
        subplots = False if single_factor else True
        title = f"short:{method_args['short_p']}, long:{method_args['long_p']}" if 'short_p' in method_args else f"{method.__name__}: {method_args['period']}"
        ax1 = equity_p.plot(figsize=figsize, subplots=subplots, layout=layout, color='tomato', title=title)
        equity_n.plot(ax=ax1, figsize=figsize, subplots=subplots, layout=layout, color='yellowgreen')
        test_ret.plot(ax=ax1, figsize=figsize, subplots=subplots, layout=layout, color='slateblue')
        plt.tight_layout()
        plt.show()

    return p_factor, n_factor


if __name__ == "__main__":
    df = pd.read_csv('ts_sample.csv')
    df = df.interpolate()

    # 测试
    d1 = df['S1_Joy']
    d2 = df['S2_Joy']

    # rs = lag_corr(d1, d2, lag=-12)
    # rs = period_lag_corr(d1, d2, 120, if_plot=True)
    # rs = window_period_lag_corr(d1, d2, 120, if_plot=True)
    # rs = regression_test(d1, d2, if_plot=True)
    # rs = lag_regression_test(d1, d2, lag=120, if_plot=True)
    # rs = period_lag_regression_test(d1, d2, lag_period=120, if_plot=True)
    # rs = window_period_lag_regression_test(d1, d2, lag_period=10, window=2000, rolling_step=500, if_plot=True)

    # d2[d2 > 0] = 1
    # d2[d2 <= 0] = 0
    # df.columns = ['ret', 'signal']
    # rs = t_test(df)
    # rs = lag_t_test(df, lag=120)
    # rs = period_lag_t_test(df, lag_period=120, if_plot=True)
    # rs = window_period_lag_t_test(df, lag_period=10, window=2000, rolling_step=500, if_plot=True)

    # rs = DTW(d1, d2)
    # rs = phase_synchrony(d1, d2)
    # rs = period_KL_divergence(d1, d2, 120, if_plot=True)
    # rs = noise_volatility_ratio(d1)

    # %% 测试 match_tp
    # 获取数据
    asset = 'BTC'
    start_date = '2015-01-01'
    end_date = '2021-10-13'
    file_name = os.path.join(DATA_DIR, f'close_ma140_ratio')
    origin_factor_series = pd.read_csv(f'{file_name}.csv', index_col=0)['close_ma140_ratio']
    file_name = os.path.join(DATA_DIR, f'{asset}_price_log_price')
    prices_df = pd.read_csv(f'{file_name}.csv', index_col=0)
    asset_prices_series = prices_df['log_prices']  # 提取该资产价格数据，进行后续的拐点匹配检验
    file_name = os.path.join(DATA_DIR, f'all_asset_ret')
    all_asset_ret = pd.read_csv(f'{file_name}.csv', index_col=0)
    asset_ret_series = all_asset_ret[asset]  # 提取该资产收益率数据，进行后续的相关性有效性检验

    tp_df = pd.concat([origin_factor_series, asset_prices_series], axis=1)
    tp_df.index = pd.to_datetime(tp_df.index)
    tp_df.dropna(inplace=True)
    tp_factor_series = tp_df[origin_factor_series.name]
    tp_price_series = tp_df['log_prices']

    # 去除趋势
    # de_trended_factor_data, factor_trend = de_trend(tp_factor_series, ts_freq='D', if_plot=True)
    # de_trended_prices_data, price_trend = de_trend(tp_price_series, ts_freq='D', if_plot=True)

    # 去除噪音
    cleaned_factor_data, factor_noise = de_noise(tp_factor_series, ts_freq='D', lamda=500, if_plot=True)
    cleaned_prices_data, price_noise = de_noise(tp_price_series, ts_freq='D', lamda=500, if_plot=True)

    match_tp1 = match_tp(cleaned_factor_data, cleaned_prices_data, tp_factor_series, tp_price_series,
                         data_frequency='day', if_plot=True)
    match_tp1.match()

    # 测试hurst_analysis
    # hurst_analysis().run(sys.argv[1:])
