#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Datetime: 2020/4/7 10:58
# @Author  : CHEN Wang
# @Site    :
# @File    : preprocess.py
# @Software: PyCharm

"""
脚本说明：因子指标常用的预处理函数： 包括横截面因子指标，以及时间序列因子指标
常用的函数例如：滤波，去燥，正交化，横截面中性化等等
"""

import copy
import numpy
import scipy.stats as st
from scipy.stats import mstats
import pandas as pd
import numpy as np
from sklearn import decomposition
from sklearn.linear_model import LinearRegression
from statsmodels import api as sm
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.filters.hp_filter import hpfilter
from matplotlib import pyplot as plt
from quant_researcher.quant.project_tool.logger.my_logger import LOG
from quant_researcher.quant.datasource_fetch.factor_api import factor_exposure_related
from quant_researcher.quant.project_tool.rebalance_date import get_rebalance_date
from quant_researcher.quant.project_tool.wrapper_tools.common_wrappers import assert_series, assert_date_index
from quant_researcher.quant.datasource_fetch.macro_api.macro_value_related import get_indicator_value_and_info
from quant_researcher.quant.datasource_fetch.crypto_api.glassnode import get_prices, get_ret, all_http, get_indicators

plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

date_name_list = ['date', 'enddate', 'end_date', 'tradedate', 'trade_date']


# %% 基础函数
def my_qcut2(factor_data, bins, cut_type='quantile'):
    """
    自定义分组函数，按照因子值的大小对资产进行分组

    :param factor_data: 截面或时序因子值序列
    :param bins: 如果填入整数n，则表示将x中的数值分成等宽的n份（即每一组内的最大值与最小值之差约相等）；
                 如果是标量序列，序列中的数值表示用来分档的分界值
                 如果是间隔索引，“ bins”的间隔索引必须不重叠


                 如果是分位数的整数，例如10用于十分位，4用于四分位
                如果是分位数数组，例如[0,0.25,0.5,0.75,1]用于四分位数

    :param cut_type: 分组类型，默认百分比分组（即根据指标的分位数进行分组），支持绝对数值分组（直接指定分界点阈值，不指定默认等宽）
    :return:与factor_data形状相同的ndarray，对应位置标记为组别

    """
    if cut_type == 'quantile':
        func = pd.qcut
    elif cut_type == 'non_quantile':
        func = pd.cut
    else:
        raise NotImplementedError
    try:
        if len(set(factor_data)) == 1 and len(factor_data) != 1:  # 如果因子值完全一样，则随机分组
            factor_data = pd.Series(range(len(factor_data)))
        if isinstance(bins, int):
            group_num = bins
        elif isinstance(bins, list):
            group_num = len(bins) - 1
        group_quantile = func(factor_data, bins, labels=[group + 1 for group in range(group_num)]).astype(float)
    except ValueError:
        print('因子值全部为nan，分组失败，返回nan值序列')
        group_quantile = factor_data

    return group_quantile


def winsorize(factor_data, methods='three_sigma', **kwargs):
    """
    横截面或者时序因子暴露去极值化

    :param pd.Series factor_data: 可以是时序因子，也可以是截面因子
    :param methods: 目前支持以下不同去极值方法：
                    methods = 'three_sigma'
                    methods = 'MAD'
                    methods = 'quantile'
    :param kwargs:
        -
        -
    :return:
    """

    if methods == 'three_sigma':
        # 先去除明显极端值(去除一个最大值，一个最小值),用于计算平均值及标准差
        temp = factor_data
        temp[temp.idxmax()] = np.nan
        temp[temp.idxmin()] = np.nan
        temp = temp.dropna()
        mean = temp.mean()
        std = temp.std()

        # 按照three_sigma去除极端值
        factor_data[factor_data > (mean + 3 * std)] = mean + 3 * std
        factor_data[factor_data < (mean - 3 * std)] = mean - 3 * std

    elif methods == 'MAD':
        md = factor_data.median()
        MAD = factor_data.mad()
        MADe = 1.483 * MAD

        factor_data[factor_data > (md + 3 * MADe)] = md + 3 * MADe
        factor_data[factor_data < (md - 3 * MADe)] = md - 3 * MADe

    elif methods == 'quantile':
        quantile_25 = factor_data.quantile(0.25)
        quantile_75 = factor_data.quantile(0.75)
        idr = quantile_75 - quantile_25

        factor_data[factor_data > (quantile_75 + 3 * idr)] = quantile_75 + 3 * idr
        factor_data[factor_data < (quantile_25 - 3 * idr)] = quantile_25 - 3 * idr

    elif methods == 'quantile_diy':
        limits = kwargs.pop('limits', [0.025, 0.025])
        inclusive = kwargs.pop('inclusive', (False, False))
        # 去极值
        data = mstats.winsorize(factor_data, limits, inclusive)
        return pd.Series(index=factor_data.index, data=data)

    return factor_data


def standardize(factor_data, methods='normal'):
    """

    :param pd.Series factor_data: 可以是时序因子，也可以是截面因子
    :param methods:
    :return:
    """

    if sum(factor_data == 0) == len(factor_data):  # 如果全为0，则不进行标准化
        return factor_data
    if methods == 'normal':
        mean = factor_data.mean()
        std = factor_data.std()
        factor_data = (factor_data - mean) / std

    elif methods == 'rank_normal':
        # 先排序再标准化
        temp = factor_data.rank()
        mean = temp.mean()
        std = temp.std()
        factor_data = (temp - mean) / std

    else:
        raise NotImplementedError

    return factor_data


def deal_na(data, start_month):
    """
    data为包含所有数据的dataframe，对所有数据进行空值处理
    start_date规定开始的月份，对于在开始月份之前都没有数据的变量进行剔除
    input:
        data: pandas.DataFrame
        start_month: string
    """
    index = data.index.tolist()
    data['date'] = data.index
    data['month'] = data.date.apply(lambda x: x[:6])
    start_month_pos = data.month.tolist().index(start_month)
    data.drop(['date', 'month'], axis=1, inplace=True)
    data.fillna(method='ffill', inplace=True)
    # 存放没有数据的指标名称
    drop_column = []
    for c in range(data.shape[1]):
        if np.isnan(data.iloc[start_month_pos, c]):
            drop_column.append(data.columns.tolist()[c])
    print("以下指标在指定日期没有数据：", drop_column)
    return data.drop(drop_column, axis=1).iloc[start_month_pos:]


# %% 横截面因子指标 - 单因子

"""
横截面因子指标入参格式规范如下, ，数据类型为pd.DataFrame
    资产代码 |日期        | 因子指标名称
    000001  |2010-01-03 | 0.2
    000002  |2010-01-03 | 0.15
日期字段名可以是date, enddate, end_date, tradedate, trade_date
"""


def cs_percentile_rank(indi_data):
    """
    横截面百分比排名

    :param indi_data: 包含tradedate的dataframe,至少需要三列，资产代码，tradedate和指标名称
    :return: 返回百分比排名
    """
    # 判断因子指标的日期字段名称
    date_name = list(set(indi_data.columns).intersection(set(date_name_list)))
    if date_name is None:
        LOG.info(f'该指标中没有符合的日期字段名称，日期字段名称必须为{date_name_list}之一')
    elif len(date_name) == 0:
        date_name = 'tradedate'
    else:
        date_name = date_name[0]

    ranks = indi_data.groupby(date_name).apply(
        lambda x: x.rank(numeric_only=True, pct=True, na_option='top'))
    return ranks


# %% 横截面因子指标 - 多因子


# %% 时间序列因子指标 - 单因子
"""
时序因子指标入参格式规范如下，数据类型为pd.Series
                | 因子指标名称
    |2010-01-03 | 0.2
    |2010-01-03 | 0.15
日期字段名可以是date, enddate, end_date, tradedate, trade_date
"""


def ts_percentile_rank_score(ts, way='expanding', rank_method='rank', periods=None, min_periods=1, positive=True,
                             scale=10, preprocess=False, **kwargs):
    """

    :param pd.Series ts: 指标序列
    :param str way: 排名百分位计算方式， 支持‘expanding’和‘rolling’, 'whole'
                    expanding, 则依据指标全样本数据确定分位点（会利用到未来数据）
                    rolling, 则使用滚动估计的方法 不断扩展样本形成动态的分位点 避免了使用未来数据
                    whole, 全历史数据排序得分
    :param str rank_method: 排名方法，支持'rank' 即指标排序百分比，'quantile' 即指标值所处的分位点
    :param int periods: 滚动窗口长度
    :param int min_periods: 窗口最小长度
    :param bool positive: 方向 - 正序排还是逆序排
    :param int scale: 转换成几分制
    :param kwargs:
        - bool if_detrend: 默认为False
        - bool if_denoise: 默认为True， if_detrend和if_denoise这个至少一个为True
        - str detrend_method: 默认为hp
        - str denoise_method: 默认为hp
        - dict detrend_kwargs: {}
        - dict denoise kwargs: {}

    :return: 返回scale分制得分，得分越高越好
    """

    data = ts.copy()

    if_detrend = kwargs.pop('if_detrend', False)
    if_denoise = kwargs.pop('if_denoise', True)
    detrend_method = kwargs.pop('detrend_method', 'hp')
    detrend_kwargs = kwargs.pop('detrend_kwargs', {})
    denoise_method = kwargs.pop('denoise_method', 'hp')
    denoise_kwargs = kwargs.pop('denoise_kwargs', {})

    if len(data.shape) == 1:  # data为纵向
        if data.unique().shape[0] == 1:  # series中所有值都一样
            return pd.Series([np.nan for i in range(len(data))], index=data.index)
    else:  # data为横向
        if data.shape[1] == 1:
            data = data.T.squeeze()

    if if_detrend and detrend_method == 'ma':
        trend_periods = detrend_kwargs['periods'] if 'periods' in detrend_kwargs.keys() else 120
        trend_min_periods = detrend_kwargs['min_periods'] if 'min_periods' in detrend_kwargs.keys() else 120
        ma_trend = data.rolling(trend_periods, trend_min_periods).mean()

    def last_rank_score(x):
        if rank_method == 'rank':
            rank = x.rank(pct=True, ascending=positive).iloc[-1]
        elif rank_method == 'quantile':
            rank_quantile = (x - x.min()) / (x.max() - x.min())
            if positive:
                rank = rank_quantile.iloc[-1]
            else:
                rank = 1 - rank_quantile.iloc[-1]
        return rank

    def last_rank_score_with_preprocess(x):
        if if_detrend:
            if detrend_method == 'hp':
                x, x_trend = de_trend(x, method=detrend_method, ts_freq='D', **detrend_kwargs)
            elif detrend_method == 'ma':
                x = x - ma_trend
            x.dropna(inplace=True)
        if if_denoise:
            x, x_noise = de_noise(x, method=denoise_method, ts_freq='D', **denoise_kwargs)
            x.dropna(inplace=True)

        if rank_method == 'rank':
            rank = x.rank(pct=True, ascending=positive).iloc[-1]
        elif rank_method == 'quantile':
            rank_quantile = (x - x.min()) / (x.max() - x.min())
            if positive:
                rank = rank_quantile.iloc[-1]
            else:
                rank = 1 - rank_quantile.iloc[-1]

        return rank

    if preprocess:
        func = last_rank_score_with_preprocess
    else:
        func = last_rank_score

    if way == 'expanding' or way == 'rolling':
        if if_detrend and detrend_method == 'ma':
            data = data[trend_min_periods - 1:]

    if way == 'expanding':
        rank = data.expanding(min_periods).apply(func)
    elif way == 'rolling':
        rank = data.rolling(periods, min_periods).apply(func)
    elif way == 'whole':
        if preprocess:
            if if_detrend:
                data, x_trend = de_trend(data, method=detrend_method, ts_freq='D', **detrend_kwargs)
                data.dropna(inplace=True)
            if if_denoise:
                data, x_noise = de_noise(data, method=denoise_method, ts_freq='D', **denoise_kwargs)
            if rank_method == 'rank':
                rank = data.rank(pct=True, ascending=positive)
            elif rank_method == 'quantile':
                rank_quantile = (data - data.min()) / (data.max() - data.min())
                if positive:
                    rank = rank_quantile
                else:
                    rank = 1 - rank_quantile

        else:
            if rank_method == 'rank':
                rank = data.rank(pct=True, ascending=positive)
            elif rank_method == 'quantile':
                rank_quantile = (data - data.min()) / (data.max() - data.min())
                if positive:
                    rank = rank_quantile
                else:
                    rank = 1 - rank_quantile

    # rank = np.ceil(rank.astype(float) * scale)
    rank = rank.astype(float) * scale  # 不需要向上取整

    return rank


def ts_percentile_rank_signal(ts, is_score=False, way='expanding', period=None, min_period=1, preprocess=False,
                              positive=True, bins=[0, 0.5, 1], **kwargs):
    """
    根据指标历史百分位转换成信号，信号输出为1为买入，0为卖出

    :param pd.Series ts:
    :param bool is_score:  ts是指标，还是得分，默认为指标
    :param str way: 转换为rank的方法
    :param int period: 转换为rank的方法参数
    :param int min_period: 转换为rank的方法参数
    :param bool positive: 信号转换成0， 1是是越大越好，还是越小越好
    :param list or int bins: 如[0, 0.5, 1], [0, 0.2, 0.8, 1]; 又或者3，则意味着三等分与[0， 1/3, 2/3, 1]意思等同
    :param kwargs:
        - str left_right: 默认为'left'
    :return:
    """

    if is_score:
        testx = ts.copy()
    else:
        testx = ts_percentile_rank_score(ts, way=way, rank_method='rank', period=period, min_periods=min_period,
                                         positive=True, scale=100, preprocess=preprocess)

    signal = testx.copy()

    if isinstance(bins, int):  # 均分为bins组，计算对应的分界值
        bins = [i / bins for i in range(bins + 1)]

    threshold1 = 100 * bins[0]
    threshold2 = 100 * bins[1]
    threshold3 = 100 * bins[-2]
    threshold4 = 100 * bins[-1]
    left_right = kwargs.pop('left_right', 'left')
    if left_right == 'left':
        if positive:
            signal[(threshold2 > testx) & (testx >= threshold1)] = 'sell'  # 第一组的为卖出信号
            signal[(threshold4 >= testx) & (testx > threshold3)] = 'buy'  # 最后一组的为买入信号
        else:
            signal[(threshold2 > testx) & (testx >= threshold1)] = 'buy'  # 第一组的为买入信号
            signal[(threshold4 >= testx) & (testx > threshold3)] = 'sell'  # 最后一组的为卖出信号
        signal[(signal != 'sell') & (signal != 'buy')] = np.nan
    elif (len(bins) == 4) and (left_right == 'right'):  # 注意只有三分法才有右侧交易这一说
        for index_num in range(len(signal) - 1):
            if positive:
                if (testx[index_num] < threshold2) and (testx[index_num + 1] >= threshold2):
                    signal.iloc[index_num + 1] = 'sell'
                elif (testx[index_num] > threshold3) and (testx[index_num + 1] <= threshold3):
                    signal.iloc[index_num + 1] = 'buy'
                else:
                    signal.iloc[index_num + 1] = np.nan
            else:
                if (testx[index_num] < threshold2) and (testx[index_num + 1] >= threshold2):
                    signal.iloc[index_num + 1] = 'buy'
                elif (testx[index_num] > threshold3) and (testx[index_num + 1] <= threshold3):
                    signal.iloc[index_num + 1] = 'sell'
                else:
                    signal.iloc[index_num + 1] = np.nan
        signal[0] = np.nan

    signal.fillna(method='ffill', inplace=True)  # 后面的NaN用前值填充
    signal.dropna(inplace=True)  # 前面的NaN直接踢掉
    signal[signal == 'buy'] = 1
    signal[signal == 'sell'] = 0

    # 注意这里的信号是没有shift的，在后续使用过程需要滞后一期使用
    return signal


def ts_threshold_signal(ts, positive=True, bins=[150, 200, 800], **kwargs):
    """
    根据指标历史百分位转换成信号，信号输出为1为买入，0为卖出

    :param ts:
    :param bool positive: 越大越好，还是越小越好
    :param list bins: 如 [0.887, 1, 1.1, 1.35]， 也可以不指定最大值最小值如[1], [1, 1.1]
    :param kwargs:
        - str left_right: 默认为'left'
    :return:
    """

    signal = ts.copy()
    if len(bins) in [1, 2]:  # 分位点只指定了一个点或两个点，有些时候我们不知道数据的最大值最小值，只知道阈值点是多少
        threshold1 = float('-inf')  #负无穷
        threshold2 = bins[0]
        threshold3 = bins[-1]
        threshold4 = float('inf')  #正无穷
    else:
        threshold1 = bins[0]
        threshold2 = bins[1]
        threshold3 = bins[-2]
        threshold4 = bins[-1]

    left_right = kwargs.pop('left_right', 'left')
    if left_right == 'left':
        if positive:
            signal[(threshold2 > ts) & (ts >= threshold1)] = 'sell'  # 第一组的为卖出信号
            signal[(threshold4 >= ts) & (ts > threshold3)] = 'buy'  # 最后一组的为买入信号
        else:
            signal[(threshold2 > ts) & (ts >= threshold1)] = 'buy'  # 第一组的为买入信号
            signal[(threshold4 >= ts) & (ts > threshold3)] = 'sell'  # 最后一组的为卖出信号
        signal[(signal != 'sell') & (signal != 'buy')] = np.nan
    elif (len(bins) in [2, 4]) and (left_right == 'right'):  # 注意只有三分法才有右侧交易这一说
        for index_num in range(len(signal) - 1):
            if positive:
                if (ts[index_num] < threshold2) and (ts[index_num + 1] >= threshold2):
                    signal.iloc[index_num + 1] = 'sell'
                elif (ts[index_num] > threshold3) and (ts[index_num + 1] <= threshold3):
                    signal.iloc[index_num + 1] = 'buy'
                else:
                    signal.iloc[index_num + 1] = np.nan
            else:
                if (ts[index_num] < threshold2) and (ts[index_num + 1] >= threshold2):
                    signal.iloc[index_num + 1] = 'buy'
                elif (ts[index_num] > threshold3) and (ts[index_num + 1] <= threshold3):
                    signal.iloc[index_num + 1] = 'sell'
                else:
                    signal.iloc[index_num + 1] = np.nan
        signal[0] = np.nan

    signal.fillna(method='ffill', inplace=True)  # 后面的NaN用前值填充
    signal.dropna(inplace=True)  # 前面的NaN直接踢掉
    signal[signal == 'buy'] = 1
    signal[signal == 'sell'] = 0

    # 注意这里的信号是没有shift的，在后续使用过程需要滞后一期使用
    return signal


def ts_mtm_reversal_signal(ts_list, positive=True, bins=[0.887, 0.9, 1, 1.1, 1.35], **kwargs):
    """
    根据指标阈值转换成信号，信号输出为1为买入，0为卖出

    :param list ts_list: 动量指标，以及反转指标
    :param bool positive: 越大越好，还是越小越好
    :param list bins: 如 [0.887, 0.9, 1, 1.1, 1.35] 或 [0.887, 0.9, 0.95, 1.05, 1.1, 1.35]，
                      也可以不指定最大值最小值如[0.9, 1, 1.1]， [0.9, 0.95, 1.05, 1.1]
    :param kwargs:
        - str left_right: 默认为'left'
    :return:
    """
    mtm_ts = ts_list[0]
    reversal_ts = ts_list[-1]

    signal = mtm_ts.copy()
    if len(bins) in [3, 4]:  # 分位点只指定了一个点或两个点，有些时候我们不知道数据的最大值最小值，只知道阈值点是多少
        threshold1 = float('-inf')  #负无穷
        threshold2 = bins[0]
        threshold3 = bins[1]
        threshold4 = bins[-2]
        threshold5 = bins[-1]
        threshold6 = float('inf')  #正无穷
    else:
        threshold1 = bins[0]
        threshold2 = bins[1]
        threshold3 = bins[2]
        threshold4 = bins[-3]
        threshold5 = bins[-2]
        threshold6 = bins[-1]

    left_right = kwargs.pop('left_right', 'left')
    if left_right == 'left':
        if positive:
            mask1 = (threshold5 >= reversal_ts) & (mtm_ts > threshold4)  # 短期动量指标大于某个阈值， 且长期指标不能突破某个极值
            mask2 = (threshold2 >= reversal_ts) & (reversal_ts > threshold1)  # 长期指标突破极低值，后续可能反转，买入
            signal[mask1 | mask2] = 'buy'  # 买入信号
            mask1 = (threshold2 <= reversal_ts) & (mtm_ts < threshold3)  # 短期动量指标小于某个阈值， 且长期指标不能突破某个极低值（突破可能意味着反弹）
            mask2 = (threshold6 >= reversal_ts) & (reversal_ts > threshold5)  # 长期指标突破极高值，后续可能反转，卖出
            signal[mask1 | mask2] = 'sell'  # 卖出信号

        else:
            mask1 = (threshold2 <= reversal_ts) & (mtm_ts < threshold3)  # 短期动量指标小于某个阈值， 且长期指标不能突破某个极低值（突破可能意味着反弹）
            mask2 = (threshold6 >= reversal_ts) & (reversal_ts > threshold5)  # 长期指标突破极高值，后续可能反转，卖出
            signal[mask1 | mask2] = 'buy'  # 买入信号
            mask1 = (threshold5 >= reversal_ts) & (mtm_ts > threshold4)  # 短期动量指标大于某个阈值， 且长期指标不能突破某个极值
            mask2 = (threshold2 >= reversal_ts) & (reversal_ts > threshold1)  # 长期指标突破极低值，后续可能反转，买入
            signal[mask1 | mask2] = 'sell'  # 卖出信号

        signal[(signal != 'sell') & (signal != 'buy')] = np.nan
    elif left_right == 'right':  # 注意只有三分法才有右侧交易这一说
        pass

    signal.fillna(method='ffill', inplace=True)  # 后面的NaN用前值填充
    signal.dropna(inplace=True)  # 前面的NaN直接踢掉
    signal[signal == 'buy'] = 1
    signal[signal == 'sell'] = 0

    # 注意这里的信号是没有shift的，在后续使用过程需要滞后一期使用
    return signal


def ts_preprocess_detrend_denoise(ts, way='expanding', periods=None, min_periods=1, if_plot=False, **kwargs):
    """
    对指标进行清洗(去趋势，去噪音)，包括rolling，expanding, whole清洗

    :param pd.Series ts: 指标值序列
    :param str way: 时间序列处理方法，支持‘rolling’，‘expanding’, ‘whole’
    :param int periods: rolling 窗口大小
    :param int min_periods: rolling，expanding 最小区间数
    :param kwargs:
        - bool if_detrend: 默认为False
        - bool if_denoise: 默认为True， if_detrend和if_denoise这个至少一个为True
        - str detrend_method: 默认为hp
        - str denoise_method: 默认为hp
        - dict detrend_kwargs: {}
        - dict denoise kwargs: {}
    :return:
    """

    data = ts.copy()

    if_detrend = kwargs.pop('if_detrend', False)
    if_denoise = kwargs.pop('if_denoise', True)
    detrend_method = kwargs.pop('detrend_method', 'hp')
    detrend_kwargs = kwargs.pop('detrend_kwargs', {})
    denoise_method = kwargs.pop('denoise_method', 'hp')
    denoise_kwargs = kwargs.pop('denoise_kwargs', {})

    if if_detrend and detrend_method == 'ma':
        trend_periods = detrend_kwargs['periods'] if 'periods' in detrend_kwargs.keys() else 120
        trend_min_periods = detrend_kwargs['min_periods'] if 'min_periods' in detrend_kwargs.keys() else 120
        ma_trend = data.rolling(trend_periods, trend_min_periods).mean()

    def latest_value_with_preprocess(x):
        if if_detrend:
            if detrend_method == 'hp':
                x, x_trend = de_trend(x, method=detrend_method, ts_freq='D', **detrend_kwargs)
            elif detrend_method == 'ma':
                x = x - ma_trend
            x.dropna(inplace=True)
        if if_denoise:
            x, x_noise = de_noise(x, method=denoise_method, ts_freq='D', **denoise_kwargs)
            x.dropna(inplace=True)
        if x.empty:
            return np.NAN
        else:
            latest_value = x.iloc[-1]
            return latest_value

    if way == 'expanding' or way == 'rolling':
        if if_detrend and detrend_method == 'ma':
            data = data[trend_min_periods - 1:]

    if way == 'expanding':
        cleaned_ts = data.expanding(min_periods).apply(latest_value_with_preprocess)
    elif way == 'rolling':
        cleaned_ts = data.rolling(periods, min_periods).apply(latest_value_with_preprocess)
    elif way == 'whole':
        if if_detrend:
            data, x_trend = de_trend(data, method=detrend_method, ts_freq='D', **detrend_kwargs)
            data.dropna(inplace=True)
        if if_denoise:
            data, x_noise = de_noise(data, method=denoise_method, ts_freq='D', **denoise_kwargs)
        cleaned_ts = data

    if if_plot:
        f, ax = plt.subplots(figsize=(15, 3))
        ax.plot(ts, label='signal')
        ax.plot(cleaned_ts, label='cleaned_signal')
        ax.plot(ts - cleaned_ts, label='noise')
        ax.set(title=f'ts_preprocess_detrend_denoise')
        plt.legend()
        plt.show()

    return cleaned_ts


def de_trend(ts, method='hp', ts_freq='M', if_plot=False, **kwargs):
    """
    序列进行去趋势

    :param pd.Series ts: 时间序列指标
    :param str method: 去趋势方法，支持'hp'， 'ma'; 均线法是以均线作为趋势项
    :param str ts_freq: 时间序列频率， 支持'Y', 'Q', 'M', 'D'; 默认为'M'
    :param bool if_plot: 是否画图
    :param kwargs:
        - str file_name: 输出图片名
        - float lamda:
        - int periods: 均线滤波对应的参数
        - int min_periods: 均线滤波对应的参数
    :return:
    """

    if method == 'hp':
        lamda = kwargs.pop('lamda', None)
        if lamda is None:
            if ts_freq == 'Q':
                lamda = 1600
            elif ts_freq == 'Y':
                lamda = 1600 * (1 / 4) ** 4
            elif ts_freq == 'M':
                lamda = 1600 * 3 ** 4
            elif ts_freq == 'D':
                lamda = 1600 * 9 ** 4
        cycle, trend = sm.tsa.filters.hpfilter(ts, lamda)
        cycle.name = ts.name
        trend.name = ts.name
    elif method == 'ma':
        period = kwargs.pop('periods', 120)
        min_periods = kwargs.pop('min_periods', period)
        trend = ts.rolling(period, min_periods).mean()
        cycle = ts - trend
    else:
        raise NotImplementedError

    if if_plot:
        f, ax = plt.subplots(figsize=(15, 3))
        ax.plot(ts, label='signal')
        ax.plot(trend, label='trend')
        ax.plot(cycle, label='cycle')
        ax.set(title=f'de_trend')
        plt.legend()

        file_name = kwargs.pop('file_name', None)
        if file_name is not None:
            plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')

        plt.show()

    return cycle, trend


def de_noise_all_method(ts, ts_freq='M', if_plot=False, **kwargs):
    """
    去除指标序列中的高频噪音

    :param pd.Series ts: 指标值序列
    :param str method: 去噪音方法， 支持''hp', 'ma', 'ema', 'mm', 'llt'
    :param str ts_freq: 数据频率，支持'Y'， 'Q'， 'M'， 'D'
    :param bool if_plot: 是否画图，默认为False, 不画图
    :param kwargs:
        - str file_name: 图片保存名字, 默认为None, 即不保存图片
        - int lamda: hp滤波法需要用到的参数
        - int periods: 均线滤波对应的参数
        - int min_periods: 均线滤波对应的参数
        - int d: llt滤波对应的参数, 默认为12
    :return:
    """

    lamda = kwargs.pop('lamda', None)
    if lamda is None:
        if ts_freq == 'Q':
            lamda = 1
        elif ts_freq == 'Y':
            lamda = 1 * (1 / 4) ** 4
        elif ts_freq == 'M':
            lamda = 1 * 3 ** 4
        elif ts_freq == 'D':
            lamda = 1 * 9 ** 4
    noise, denoised_signal_hp = sm.tsa.filters.hpfilter(ts, lamda)
    denoised_signal_hp.name = ts.name

    period = kwargs.pop('periods', 7)
    min_periods = kwargs.pop('min_periods', period)
    denoised_signal_ma = ts.rolling(period, min_periods).mean()

    span = kwargs.pop('periods', 7)
    min_periods = kwargs.pop('min_periods', span)
    denoised_signal_ema = ts.ewm(span=span, min_periods=min_periods, adjust=True).mean()

    period = kwargs.pop('period', 7)
    min_periods = kwargs.pop('min_periods', period)
    denoised_signal_mm = ts.rolling(period, min_periods).median()

    d = kwargs.pop('d', 12)
    cp = copy.deepcopy(ts)
    tmp = copy.deepcopy(ts)
    a = 2 / (d + 1)
    for i in range(2, len(tmp)):
        tmp[i] = (a - (a * a) / 4) * cp[i] + ((a * a) / 2) * cp[i - 1] - (a - 3 * (a * a) / 4) * cp[
            i - 2] + 2 * (1 - a) * tmp[i - 1] - (1 - a) * (1 - a) * tmp[i - 2]
        denoised_signal_llt = tmp
        noise = ts - denoised_signal_llt

    if if_plot:
        f, ax = plt.subplots(figsize=(15, 10))
        # ax.plot(ts, label='signal')
        # ax.plot(denoised_signal, label='denoised_signal')
        # ax.plot(noise, label='noise')

        ax.plot(denoised_signal_hp, label='denoised_signal_hp')
        ax.plot(denoised_signal_ma, label='denoised_signal_ma')
        # ax.plot(denoised_signal_ema, label='denoised_signal_ema')
        # ax.plot(denoised_signal_mm, label='denoised_signal_mm')

        ax.set(title=f'de_noise')
        plt.legend()

        file_name = kwargs.pop('file_name', None)
        if file_name is not None:
            plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')

        plt.show()

    return denoised_signal_hp, noise


def de_noise(ts, method='hp', ts_freq='M', if_plot=False, **kwargs):
    """
    去除指标序列中的高频噪音

    :param pd.Series ts: 指标值序列
    :param str method: 去噪音方法， 支持''hp', 'ma', 'ema', 'mm', 'llt'
    :param str ts_freq: 数据频率，支持'Y'， 'Q'， 'M'， 'D'
    :param bool if_plot: 是否画图，默认为False, 不画图
    :param kwargs:
        - str file_name: 图片保存名字, 默认为None, 即不保存图片
        - int lamda: hp滤波法需要用到的参数
        - int periods: 均线滤波对应的参数
        - int min_periods: 均线滤波对应的参数
        - int d: llt滤波对应的参数, 默认为12
    :return:
    """

    if method == 'hp':
        lamda = kwargs.pop('lamda', None)
        if lamda is None:
            if ts_freq == 'Q':
                lamda = 1
            elif ts_freq == 'Y':
                lamda = 1 * (1 / 4) ** 4
            elif ts_freq == 'M':
                lamda = 1 * 3 ** 4
            elif ts_freq == 'D':
                lamda = 1 * 9 ** 4
        noise, denoised_signal = sm.tsa.filters.hpfilter(ts, lamda)
        denoised_signal.name = ts.name
    elif method == 'ma':
        periods = kwargs.pop('periods', 7)
        min_periods = kwargs.pop('min_periods', periods)
        denoised_signal = ts.rolling(periods, min_periods).mean()
        noise = ts - denoised_signal
    elif method == 'ema':
        periods = kwargs.pop('periods', 7)
        min_periods = kwargs.pop('min_periods', periods)
        denoised_signal = ts.ewm(span=periods, min_periods=min_periods, adjust=True).mean()
        noise = ts - denoised_signal
    elif method == 'mm':
        periods = kwargs.pop('periods', 7)
        min_periods = kwargs.pop('min_periods', periods)
        denoised_signal = ts.rolling(periods, min_periods).median()
        noise = ts - denoised_signal
    elif method == 'llt':
        d = kwargs.pop('d', 12)
        cp = copy.deepcopy(ts)
        tmp = copy.deepcopy(ts)
        a = 2 / (d + 1)
        for i in range(2, len(tmp)):
            tmp[i] = (a - (a * a) / 4) * cp[i] + ((a * a) / 2) * cp[i - 1] - (a - 3 * (a * a) / 4) * cp[
                i - 2] + 2 * (1 - a) * tmp[i - 1] - (1 - a) * (1 - a) * tmp[i - 2]
            denoised_signal = tmp
            noise = ts - denoised_signal
    else:
        raise NotImplementedError

    if if_plot:
        f, ax = plt.subplots(figsize=(15, 3))
        ax.plot(ts, label='signal')
        ax.plot(denoised_signal, label='denoised_signal')
        ax.plot(noise, label='noise')
        ax.set(title=f'de_noise')
        plt.legend()

        file_name = kwargs.pop('file_name', None)
        if file_name is not None:
            plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')

        plt.show()

    return denoised_signal, noise


def de_seasonal(ts, model='add'):
    result = sm.tsa.seasonal_decompose(ts, model, freq=1)
    return result


def ts_diff(ts, period=1):
    ts = ts.diff(period)

    return ts


def stationary(ts, method='diff'):
    """
    本函数采用4期移动平均以及差分进行平稳化处理
    可改进的地方：采用不同期数的移动平均，采用月度求和、月度平均，采用HP滤波
    input:
        data: dataframe
    """

    if adfuller(ts)[1] < 0.1:
        static = True
    else:
        static = False

    # 如果数据不是平稳序列，进行一下操作
    if not static:
        print("进行平稳化处理-----------------")
        if method == 'diff':  # 差分处理，去除非平稳
            ts = ts.copy(deep=True)  # 计算数据的一阶差分值
            data = (ts - ts.shift(1)).dropna(how='any', axis=0)  # 进行以上操作会产生空缺值，将空缺值剔除
            print(data)
        else:
            ts = ts.rolling(4).mean().dropna(how='any', axis=0)  # 求数据的移动平均值
            cycle, trend = hpfilter(ts, 1)  # 使用hp滤波提取周期项目
            ts = cycle

        #  检验经过处理的数列是否平稳，不平稳的序列做出相应提示
        if adfuller(ts)[1] < 0.1:
            static = True
        else:
            print("The data still not statical")
    return ts


def ts_monthlize(ts, lamb=100, plot=False):
    """
    月度化数据
    对于更低频的基础数据，重采样后进行插值，填补缺失值
    对于更高频的基础数据，先使用HP滤波平滑后转为月度数据

    :return:
    """
    ts.index = pd.to_datetime(ts.index)
    ts.dropna(inplace=True)
    monthly = ts.resample('M').last()

    # 如果月度化后的数据长度大于或等于原数据长度，则认为原数据长度频率为月度, 或者是季频等
    if monthly.shape[0] >= ts.shape[0]:
        monthly.interpolate(inplace=True)  # 由季频变成月频，则中间数据进行插值
        ts = monthly
        return ts
    # 如果月度化数据后，数据减少，则原先的数据频率高于月度，进行滤波后提取月度数据
    else:
        cycle, trend = sm.tsa.filters.hpfilter(ts, lamb=lamb)
        after_ts = trend.resample('M').last()
        if plot:
            plt.plot(ts, label='before_monthlize')
            plt.plot(after_ts, label='after_monthlize')
            plt.legend()
            plt.show()
        return after_ts


def ts_fill_na(ts, plot=False):
    """
    ts为月频时间序列
    对于经常性缺失，使用前值进行填补。
    对于非经常性确实，使用线性插值法进行填补.

    :param plot:
    :return:
    """
    output = copy.deepcopy(ts)
    # 判断经常性缺失的月份 / 统计每个月份缺失数据的数量，达到一定比例定位经常性缺失
    # 前3年和后3年均无该月份数据
    month_na_frequency = ts.groupby(ts.index.month).apply(lambda x: x.isnull().sum() / len(x))
    # print(month_na_frequency)
    # 设置超过一半空值的月份为经常性缺失月份
    frequent_missing_month = month_na_frequency[month_na_frequency > 0.5].index
    # print(frequent_missing_month)
    # 填充经常性缺失月份数据
    # for i in range(1, ts.shape[0]):
    #     if ts.index.tolist()[i].month in frequent_missing_month:
    #         if ts.iloc[[i]].isna()[0]:
    #             ts.iloc[i] = ts.iloc[i - 1]
    #             print(
    #                 f"filling frequent missing at month {ts.index.tolist()[i].month}")
    need_fill_index = ts.index.month.isin(frequent_missing_month)
    output.loc[need_fill_index] = ts.ffill()

    # 某年份超过4个月存在缺失数据，则认为该年份存在非经常性缺失
    # 不被判定为经常性缺失的月份，认为该月份为非经常性缺失

    # 填充非经常性缺失的月份的数据，使用插值法填充
    output.interpolate(inplace=True)

    if plot:
        fig = plt.figure()
        plt.plot(output)
        plt.show()

    return output


class factor_cleaning:
    def __init__(self, factor_data, yoy):
        """

        :param factor_data: 单个需要清洗的因子数据（DataFrame）
        :param bool yoy: 同比还是环比，True为同比，False为环比
        """
        self.factor_data = factor_data
        self.yoy = yoy

    def monthlize(self, lamb=100, plot=False):
        """
        月度化数据
        对于不是月度数据，使用HP滤波平滑后转为月度数据
        :return:
        """
        self.factor_data = ts_monthlize(self.factor_data, lamb=lamb, plot=plot)

    def fill_na(self, plot=False):
        """
        对于经常性缺失，使用前值进行填补。对于非经常性确实，使用线性插值法进行填补
        :param plot:
        :return:
        """
        self.unsmooth_factor_data = ts_fill_na(self.factor_data, plot=plot)

    def smooth(self, plot=False):

        # 使用LLT滤波进行平滑处理
        after, noise = de_noise(self.factor_data, method='llt', d=12)
        self.factor_data = after
        if plot:
            plt.plot(self.factor_data, label='before_detrend')
            plt.plot(self.factor_data, label='after_detrend')
            plt.legend()
            plt.show()

    def de_trend(self):
        # 如果数据不是同比数据的话，使用同比进行去除趋势
        if not self.yoy:
            self.factor_data = self.factor_data.pct_change(12).dropna()

    def cleaning_data(self):
        """

        :return: 平滑的因子数据（pd.series）self.factor_data， 和只做了变频和填充na值
        未平滑的self.unsmooth_factor_data
        """
        self.monthlize()
        self.de_trend()
        self.fill_na()
        self.smooth()
        return self.factor_data, self.unsmooth_factor_data


class TP_analysis:
    def __init__(self, smooth_factor_data, unsmooth_factor_data, data_frequency='month'):
        '''

        :param smooth_factor_data:
        :param unsmooth_factor_data:
        :param str data_frequency: 数据频率，支持'day', "month" or "season"
        '''
        self.unsmooth_factor_data = unsmooth_factor_data
        self.factor_data = smooth_factor_data
        self.data_frequency = data_frequency

    def initial_turnning_point(self, plot=False):
        """
        初步识别拐点，使用前后N期数据(N由data_frequency决定)的最大值和最小值最为拐点

        :return:
        """
        if self.data_frequency == 'day':
            N = 30
        elif self.data_frequency == 'month':
            N = 6
        elif self.data_frequency == 'season':
            N = 2

        self.N = N
        tp_df = pd.DataFrame(columns=['max', 'min'])
        for i in range(0, self.factor_data.shape[0]):
            before_index = i - N
            end_index = i + N
            if before_index < 0:
                before_index = 0
            if self.factor_data[i] == max(self.factor_data[before_index:end_index]):
                tp_df.loc[self.factor_data.index[i], 'max'] = 1
            else:
                tp_df.loc[self.factor_data.index[i], 'max'] = 0
            if self.factor_data[i] == min(self.factor_data[before_index:end_index]):
                tp_df.loc[self.factor_data.index[i], 'min'] = 1
            else:
                tp_df.loc[self.factor_data.index[i], 'min'] = 0
        if plot:
            self.plot_turnning_point(tp_df, self.factor_data, title='第一次拐点识别与滤波指标')
            plt.legend()
            plt.show()

        self.initial_tp = tp_df

    def tp_select(self, plot=False):
        """
        拐点的调整筛选

        :return:
        """
        tp_df = copy.deepcopy(self.initial_tp)

        # 去除数据开头与结尾head_tail_clear期内的拐点
        if self.data_frequency == 'day':
            head_tail_clear = 30
        elif self.data_frequency == 'month':
            head_tail_clear = 6
        elif self.data_frequency == 'season':
            head_tail_clear = 2

        tp_df.iloc[-head_tail_clear:, ] = 0
        tp_df.iloc[:head_tail_clear, ] = 0

        # 获取拐点对应的index位置
        max_point = tp_df[tp_df['max'] == 1]
        min_point = tp_df[tp_df['min'] == 1]
        max_position = [self.factor_data.index.tolist().index(m) for m in max_point.index]
        min_position = [self.factor_data.index.tolist().index(m) for m in min_point.index]

        def adj_distance(peak_position=[], distance=20, value_data=None, peak_type='peak'):
            """
            检测调整峰与峰（谷与谷）或峰谷之间间距，间距相差不小于distance

            :param peak_position: 峰（谷）的位置
            :param distance: 最小周期间距
            :param value_data: 具体数据
            :param peak: 是峰值还是谷值
            :return:
            """

            selected_peak_position = []

            for peak_point in peak_position:
                # 判断该极值点的distance内是否存在其他极值点
                select_peak_range = [i for i in peak_position if
                                     peak_point <= i < peak_point + distance]
                if len(select_peak_range) == 1:  # 如果只有该峰(谷)值，则该峰(谷)值可以入选
                    selected_peak_position.append(peak_point)
                else:  # 如果存在其他峰(谷)值，则需要比较峰(谷)值的大小，取较大（小）者
                    peak_value = value_data.iloc[select_peak_range[0]]
                    selected = select_peak_range[0]
                    if peak_type == 'peak':  # 峰峰之间
                        for i in select_peak_range:
                            if peak_value < value_data.iloc[i]:
                                peak_value = value_data.iloc[i]
                                selected = i
                    elif peak_type == 'through':  # 谷谷之间
                        for i in select_peak_range:
                            if peak_value > value_data.iloc[i]:
                                peak_value = value_data.iloc[i]
                                selected = i
                    elif peak_type == 'all':  # 所有谷峰一起，保留第一个
                        pass
                    peak_2_delete = list(set(select_peak_range).difference(set([selected])))
                    for idx in peak_2_delete:  # 剔除某些峰值后需要更新peak_positon, 因此需要重新循环
                        peak_position.remove(idx)
                    return None, peak_position

            return selected_peak_position, peak_position

        def check_adjacency_relation(all_position, max_position, min_position, value_data):
            """
            峰与谷必须交替出现
            :param all_position:
            :param max_position:
            :param min_position:
            :param value_data:
            :return:
            """
            for i in range(len(all_position) - 1):
                position_list = all_position[i:i + 2]
                peak_value = value_data.iloc[position_list[0]]
                selected = position_list[0]
                if set(position_list) <= set(max_position):  # 相邻的两个点都是极大值
                    if peak_value < value_data.iloc[position_list[1]]:
                        selected = position_list[1]
                    peak_2_delete = list(set(position_list).difference(set([selected])))[0]
                    # 剔除某些峰值后需要更新all_position和max_positon, 因此需要重新循环
                    all_position.remove(peak_2_delete)
                    max_position.remove(peak_2_delete)
                    return False, all_position, max_position, min_position

                elif set(position_list) <= set(min_position):  # 相邻的两个点都是极小值
                    if peak_value > value_data.iloc[position_list[1]]:
                        selected = position_list[1]
                    peak_2_delete = list(set(position_list).difference(set([selected])))[0]
                    # 剔除某些峰值后需要更新all_position和min_position, 因此需要重新循环
                    all_position.remove(peak_2_delete)
                    min_position.remove(peak_2_delete)
                    return False, all_position, max_position, min_position
                else:
                    pass

            return True, all_position, max_position, min_position

        # p_p_distance 峰峰之间最少距离, p_t_distance 峰谷之间最少距离
        if self.data_frequency == 'day':
            p_p_distance = 60
            p_t_distance = 30
        elif self.data_frequency == 'month':
            p_p_distance = 16
            p_t_distance = 6
        elif self.data_frequency == 'season':
            p_p_distance = 4
            p_t_distance = 2

        # 峰峰之间要相差16个月
        adj_flag = True
        while adj_flag:
            selected_max_position, max_position = adj_distance(max_position, distance=p_p_distance,
                                                               value_data=self.factor_data,
                                                               peak_type='peak')
            if selected_max_position is not None:
                adj_flag = False

        # 谷谷之间要相差16个月
        adj_flag = True
        while adj_flag:
            selected_min_position, min_position = adj_distance(min_position, distance=p_p_distance,
                                                               value_data=self.factor_data,
                                                               peak_type='through')
            if selected_min_position is not None:
                adj_flag = False

        all_position = list(set(selected_max_position).union(set(selected_min_position)))
        all_position.sort()
        # 峰与谷之间要相差6个月
        adj_flag = True
        while adj_flag:
            selected_all_position, all_position = adj_distance(all_position, distance=p_t_distance,
                                                               value_data=self.factor_data,
                                                               peak_type='all')
            if selected_all_position is not None:
                adj_flag = False

        selected_max_position = list(
            set(selected_max_position).intersection(set(selected_all_position)))
        selected_max_position.sort()
        selected_min_position = list(
            set(selected_min_position).intersection(set(selected_all_position)))
        selected_min_position.sort()

        # 峰与谷必须交替出现
        adj_flag = True
        while adj_flag:
            flag, selected_all_position, selected_max_position, selected_min_position = check_adjacency_relation(
                selected_all_position, selected_max_position, selected_min_position,
                value_data=self.factor_data)
            if flag:
                adj_flag = False

        selected_max_position = np.array(tuple(selected_max_position))
        selected_min_position = np.array(tuple(selected_min_position))

        tp_df['max'] = 0
        if selected_max_position.size > 0:  # 选取的极大值点不为空
            tp_df.iloc[selected_max_position, 0] = 1
        tp_df['min'] = 0
        if selected_min_position.size > 0:  # 选取的极小值点不为空
            tp_df.iloc[selected_min_position, 1] = 1
        if plot:
            self.plot_turnning_point(tp_df, self.factor_data, title='拐点调整与滤波指标')
            plt.legend()
            plt.show()

        self.selected_tp = tp_df

    def adjust_turnning_point(self, plot=False):
        # 在平滑后的序列的前后N期寻找原序列的拐点
        # print(self.unsmooth_factor_data)
        if self.data_frequency == 'day':
            N = 10
        elif self.data_frequency == 'month':
            N = 4
        elif self.data_frequency == 'season':
            N = 8  # 季度数据可能相比月度更加平滑，加大拐点搜索范围

        max_point = self.selected_tp[self.selected_tp['max'] == 1]
        min_point = self.selected_tp[self.selected_tp['min'] == 1]
        max_position = [self.factor_data.index.tolist().index(m) for m in max_point.index]
        min_position = [self.factor_data.index.tolist().index(m) for m in min_point.index]

        origin_tp_df = pd.DataFrame(columns=['max', 'min'])
        for max in max_position:
            for i in range(max - N, max + N + 1):
                length = len(self.unsmooth_factor_data)
                if self.unsmooth_factor_data[i] == np.max(
                        self.unsmooth_factor_data.iloc[np.max([0, i - 2]): np.min([i + 3, length])]):
                    origin_tp_df.loc[self.unsmooth_factor_data.index[i], 'max'] = 1
                    break

        for min in min_position:
            for i in range(min - N, min + N + 1):
                length = len(self.unsmooth_factor_data)
                if self.unsmooth_factor_data.iloc[i] == np.min(
                        self.unsmooth_factor_data.iloc[np.max([0, i - 2]): np.min([i + 3, length])]):
                    origin_tp_df.loc[self.unsmooth_factor_data.index[i], 'min'] = 1
                    break
        # 对original tp进行检测，查看是否选取对tp为原始数据对local extreme
        # for date in origin_tp_df[origin_tp_df['max'] == 1].index:
        #     i = self.unsmooth_factor_data.index.tolist().index(date)
        #     if self.unsmooth_factor_data.loc[date, ] != np.max(self.unsmooth_factor_data.iloc[i-1:i+2, ]):
        #         origin_tp_df.loc[date, 'max'] = np.nan
        # for date in origin_tp_df[origin_tp_df['min'] == 1].index:
        #     i = self.unsmooth_factor_data.index.tolist().index(date)
        #     if self.unsmooth_factor_data.loc[date, ] != np.min(self.unsmooth_factor_data.iloc[i-1:i+2, ]):
        #         origin_tp_df.loc[date, 'min'] = np.nan
        origin_tp_df.sort_index(inplace=True)
        if plot:
            self.plot_turnning_point(origin_tp_df, self.unsmooth_factor_data, title='拐点日期与原始指标匹配')
            plt.plot(self.factor_data, color='orange')
            plt.legend()
            plt.show()

        self.origin_tp_df = origin_tp_df

    def plot_turnning_point(self, tp_df, data, title=''):
        fig = plt.figure()
        local_max = tp_df[tp_df['max'] == 1]
        local_min = tp_df[tp_df['min'] == 1]
        plt.plot(data)
        plt.plot(local_max.index, data.loc[local_max.index], 'o', color='tab:red')
        plt.plot(local_min.index, data.loc[local_min.index], 'o', color='tab:green')
        plt.title(title)

    def ana_tp(self, plot=False):
        self.initial_turnning_point(plot)
        self.tp_select(plot)
        self.adjust_turnning_point(plot)
        return self.origin_tp_df


# %% 时间序列因子指标 - 多因子

def neutralize_series(se, tradedate, exclude_industry=True, exclude_style_list=[]):
    # 中性化
    # 可选style_list=['BETA','MOMENTUM','SIZE','EARNYILD','RESVOL','GROWTH','BTOP','LEVERAGE','LIQUIDTY','SIZENL']
    # Exposure = factor_exposure_related.get_barra_factor_exposure(end_date=tradedate, factor_type='all')
    Exposure = factor_exposure_related.get_barra_factor_exposure(start_date=tradedate,
                                                                 end_date=tradedate,
                                                                 only_latest=True,
                                                                 factor_type='all')
    Exposure = Exposure.rename(columns={'stock_code': 'stockcode'})
    Exposure = Exposure.drop(columns=['end_date', 'country'])
    # Exposure = Exposure.iloc[:, 1:-3]
    # del Exposure['tradedate']
    # del Exposure['sw_ind1']
    if exclude_industry:
        Exposure = Exposure.loc['beta':'sizenl']  # ID：SIZENL
    if exclude_style_list != []:
        for style in exclude_style_list:
            del Exposure[style]
    if Exposure.shape == 0:
        print('无风格因子或行业哑变量')
        return
    se = se.reset_index()
    neuthalizedata = pd.merge(se, Exposure, on='stockcode', how='left')
    neuthalizedata.replace(np.nan, 0, inplace=True)
    results = sm.OLS(neuthalizedata.iloc[:, 1], neuthalizedata.iloc[:, 2:]).fit()
    neuthalize_se = pd.DataFrame(results.resid)
    neuthalize_se.index = se['stockcode']
    return neuthalize_se


def preprocess(data, factor, asset_type, date, winsor=True, neutral=False, standard=True,
               exclude_industry=None, exclude_style_list=None):
    if winsor:
        data[factor] = pd.Series(
            winsorize(data[factor], methods='quantile_diy', limits=[0.025, 0.025], inclusive=(
                False, False)))
    if neutral and asset_type == 'stock':
        data[factor] = neutralize_series(data[factor], date, exclude_industry=exclude_industry,
                                         exclude_style_list=exclude_style_list).iloc[:, 0].tolist()
    if standard:
        data[factor] = pd.Series(standardize(data[factor]))
    return data


# %% 脚本说明: 如果因子直接存在较强相关性，因子相关性处理方法
def symmetric_orth(fac_data, asset_type):
    """
    调用因子分析模块对称正交化脚本

    :param fac_data: DataFrame, 双重索引, 第一重为tradedate, 第二重为’资产类型+code‘, 例如股票为stockcode,
    其余列为因子数据
    :param asset_type: str, 目前asset_type支持'stock', 'manager', 'fund', 'sw', 'csi';
    'sw' 和'csi' 分别代表申万一级行业和中证一级行业
    :return:
    """

    fac_data_filled = fac_data.groupby(level=1).fillna(method='ffill')
    # mean = fac_data_filled.groupby('tradedate').mean()
    fac_data_filled = fac_data_filled.fillna(value=fac_data_filled.mean())
    asset_codes = fac_data.reset_index()[f'{asset_type}code'].unique()
    ortho_dates = fac_data.reset_index().tradedate.unique()
    new_fac_data = pd.DataFrame()
    # 对称正交化
    for date in ortho_dates:
        X = fac_data_filled.loc[(date,),]
        codes = X.index.tolist()
        M = np.dot(X.T, X)
        a, U = np.linalg.eig(M)  # U为特征向量，a为特征值
        one = np.identity(X.shape[1])
        D = one * a  # 生成有特征值组成的对角矩阵
        D_inv = np.linalg.inv(D)
        S = U.dot(np.sqrt(D_inv)).dot(U.T)
        # M = (len(X) - 1) * np.cov(X.T)
        # D, U = np.linalg.eig(M)
        # U = np.mat(U)
        # d = np.mat(np.diag(D ** (-0.5)))
        # S = U * d * U.T
        X_orthogonal = np.dot(X, S)
        X = pd.DataFrame(X_orthogonal)
        X = X.fillna(X.mean()).fillna(X.mean(axis=1))
        X['tradedate'] = date
        X[f'{asset_type}code'] = codes
        X = X.set_index(['tradedate', f'{asset_type}code'])
        X.columns = fac_data.columns
        new_fac_data = pd.concat([new_fac_data, X])

    return new_fac_data


def Schimidt1(X1):
    X1 = X1.values
    R = np.zeros((X1.shape[1], X1.shape[1]))
    Q = np.zeros(X1.shape)
    for k in range(0, X1.shape[1]):
        R[k, k] = np.sqrt(np.dot(X1[:, k], X1[:, k]))
        Q[:, k] = X1[:, k] / R[k, k]
        for j in range(k + 1, X1.shape[1]):
            R[k, j] = np.dot(Q[:, k], X1[:, j])
            X1[:, j] = X1[:, j] - R[k, j] * Q[:, k]
    return X1


# 逐步正交化（施密特正交化）
def Schimidt_orth(fac_data, asset_type):
    """
    调用因子分析模块施密特正交化脚本

    :param fac_data: DataFrame, 双重索引, 第一重为tradedate, 第二重为’资产类型+code‘, 例如股票为stockcode,
    其余列为因子数据
    :param asset_type: str, 目前asset_type支持'stock', 'manager', 'fund', 'sw', 'csi';
    'sw' 和'csi' 分别代表申万一级行业和中证一级行业
    """
    fac_data_filled = fac_data.groupby(level=1).fillna(method='ffill')
    fac_data_filled = fac_data_filled.fillna(value=fac_data_filled.mean())
    ortho_dates = fac_data.reset_index().tradedate.unique()
    new_fac_data = pd.DataFrame()
    # 施密特正交化
    for date in ortho_dates:
        X = fac_data_filled.loc[(date,),]
        codes = X.index.tolist()
        X_orthogonal = Schimidt1(X)
        X = pd.DataFrame(X_orthogonal)
        X = X.fillna(X.mean()).fillna(X.mean(axis=1))
        X['tradedate'] = date
        X[f'{asset_type}code'] = codes
        X = X.set_index(['tradedate', f'{asset_type}code'])
        X.columns = fac_data.columns
        new_fac_data = pd.concat([new_fac_data, X])
    return new_fac_data


def PCA_orth(fac_data, asset_type):
    """
    调用因子分析模块PCA脚本用于正交

    :param fac_data: DataFrame, 双重索引, 第一重为tradedate, 第二重为’资产类型+code‘, 例如股票为stockcode,
    其余列为因子数据
    :param asset_type: str, 目前asset_type支持'stock', 'manager', 'fund', 'sw', 'csi';
    'sw' 和'csi' 分别代表申万一级行业和中证一级行业
    """
    fac_data_filled = fac_data.groupby(level=1).fillna(method='ffill')
    fac_data_filled = fac_data_filled.fillna(value=fac_data_filled.mean())
    ortho_dates = fac_data.reset_index().tradedate.unique()
    new_fac_data = pd.DataFrame()
    pca = decomposition.PCA(n_components=len(fac_data.columns))
    for date in ortho_dates:
        X = fac_data_filled.loc[(date,),]
        codes = X.index.tolist()
        # centering处理
        X_modeling = fac_data_filled.loc[(date,),] - fac_data_filled.loc[(date,),].mean()
        X_orthogonal = pca.fit_transform(X_modeling)
        X = pd.DataFrame(X_orthogonal)
        X = X.fillna(X.mean()).fillna(X.mean(axis=1))
        X['tradedate'] = date
        X[f'{asset_type}code'] = codes
        X = X.set_index(['tradedate', f'{asset_type}code'])
        X.columns = fac_data.columns
        new_fac_data = pd.concat([new_fac_data, X])
    return new_fac_data


def regress_orth(X1, y):
    model = LinearRegression().fit(X1, y)
    residuals = y - np.dot(X1, model.coef_.T) - model.intercept_
    return residuals


def preprocess_freq_start_date(datax, datay, start_period=0, freq='MS', period=1):
    """
    调整指标与收益率的频率，以及起始时间

    :param datax:
    :param datay:
    :param start_period:
    :param freq:
    :param period:
    :return:
    """

    datax = datax.shift(1)  # 指标需要滞后一期运用
    df = pd.concat([datax, datay], axis=1).dropna()
    df = df.shift(start_period)  # 定期调仓可能会跟起始时间有关，所以需要测试不同的起始时间
    df.columns = ['factor', 'ret']

    if freq != 'D':  # 测试的频率不是日频，则需要计算相应频率的收益率序列
        start = df.index.min()
        end = df.index.max()

        rebalance_date = get_rebalance_date(start, end, freq, period, initial_start=False, initial_end=False)
        indic_data_dict = {}
        cum_ret_dict = {}
        for i in range(len(rebalance_date) - 1):
            date = rebalance_date[i]
            next_date = rebalance_date[i + 1]
            indic_data_dict[date] = df['factor'][date]  # 调仓前一天指标的值，因为之前以及shift过一次
            temp_ret_series = df['ret'][date:next_date]
            cum_ret_series = (temp_ret_series[:-1] + 1).cumprod() - 1
            cum_ret_dict[date] = cum_ret_series[-1]  # 调仓期间的累计收益率

        indic_data = pd.Series(indic_data_dict)
        ret_series = pd.Series(cum_ret_dict)
    else:
        indic_data = df['factor']
        ret_series = df['ret']

    return indic_data, ret_series


if __name__ == '__main__':
    # 获取测试数据
    # df = pd.read_csv('factor.csv')
    # df.set_index(['end_date'], inplace=True)
    # ts = df['M2:同比']

    # 获取测试数据
    origin_factor_df = get_indicators(indic_name='aSOPR', asset='BTC', start_date='2015-01-01', end_date='2021-10-29')
    ts = origin_factor_df['aSOPR']
    ts.index = pd.to_datetime(ts.index)

    # 测试ts_diff
    # aaa = ts_diff(ts, period=2)

    # 测试 ts_threshold_signal
    # bbb = ts_threshold_signal(ts, positive=False, bins=[ts.min(), 1, 1.2, ts.max()], left_right='right')

    # 测试 ts_preprocess_detrend_denoise
    # bbb = ts_preprocess_detrend_denoise(ts, way='rolling', periods=100, min_periods=100, if_plot=True,
    #                                     if_detrend=True,
    #                                     detrend_method='ma', detrend_kwargs={'periods': 120, 'min_periods': 120},
    #                                     if_denoise=True,
    #                                     denoise_method='ma', denoise_kwargs={'periods': 30, 'min_periods': 30})

    # bbb = ts_preprocess_detrend_denoise(ts, way='whole', periods=100, min_periods=100, if_plot=True,
    #                                     if_detrend=True, if_denoise=True,
    #                                     detrend_method='hp', detrend_kwargs={},
    #                                     denoise_method='ma', denoise_kwargs={'periods': 30, 'min_periods': 30})

    # # 测试de_trend
    # bbb = de_trend(ts, method='hp', ts_freq='D', if_plot=True)
    # bbb = de_trend(ts, method='ma', ts_freq='D', if_plot=True, periods=120)

    # # 测试de_noise
    # bbb = de_noise(ts, method='hp', ts_freq='D', if_plot=True)
    # bbb = de_noise(ts, method='ma', ts_freq='M', if_plot=True, period=7, min_periods=7)
    # bbb = de_noise(ts, method='ema', ts_freq='M', if_plot=True, span=7, min_periods=7)
    # bbb = de_noise(ts, method='mm', ts_freq='M', if_plot=True, period=7, min_periods=7)
    # bbb = de_noise_all_method(ts, ts_freq='D', if_plot=True, period=15, span=15, min_periods=15)

    # 测试ts_percentile_rank_score
    bbb = ts_percentile_rank_score(ts, way='whole', rank_method='rank', period=100, min_period=100, positive=False,
                                   scale=100, preprocess=True,
                                   if_detrend=True, detrend_method='ma',
                                   detrend_kwargs={'periods': 120, 'min_periods': 120},
                                   if_denoise=True)

    # 测试ts_percentile_rank_signal
    bbb = ts_percentile_rank_signal(ts, way='whole', period=100, min_period=100, positive=False, bins=[0, 0.2, 0.8, 1],
                                    left_right='right')

    # 测试 factor_cleaning
    # target_indicator = 'GDP:不变价:当季同比'  # 实际GDP
    # # target_indicator = 'GDP:现价:当季同比'    # 名义GDP
    # # target_indicator = 'CPI:食品:当月同比'    # CPI
    # target_data = get_indicator_value_and_info(indic_name=target_indicator, start_date='2000-01-01')
    # target_data.drop_duplicates(subset=['end_date', 'indic_name'], keep='first', inplace=True)
    # target_data = target_data.set_index(['end_date', 'indic_name'])['indic_value'].unstack()
    # print(target_data)
    # # target_data.to_csv('target.csv')
    #
    # factor = ['M2:同比', '制造业PMI']
    # factor_data = get_indicator_value_and_info(indic_name=factor, start_date='2000-01-01')
    # factor_data.drop_duplicates(subset=['end_date', 'indic_name'], keep='first', inplace=True)
    # factor_data = factor_data.set_index(['end_date', 'indic_name'])['indic_value'].unstack()
    # print(factor_data)
    # # factor_data.to_csv('factor.csv')

    target_data = pd.read_csv('target.csv', index_col=0)
    target_data.index = pd.to_datetime(target_data.index, format="%Y-%m-%d")
    factor_data = pd.read_csv('factor.csv', index_col=0)
    factor_data.index = pd.to_datetime(factor_data.index, format="%Y-%m-%d")

    factor1 = factor_cleaning(factor_data.iloc[:, 0], yoy=True)
    cleaned_factor_data, unsmooth_factor_data = factor1.cleaning_data()
    target1 = factor_cleaning(target_data.iloc[:, 0], yoy=True)
    cleaned_target_data, unsmooth_target_data = target1.cleaning_data()

    # 测试 TP_analysis
    tp1 = TP_analysis(cleaned_factor_data, unsmooth_factor_data)
    tp1.ana_tp()
