#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Datetime: 2021/10/05 14:17
# @Author  : CHEN Wang
# @Site    :
# @File    : crypto_timing_factor_analyse.py
# @Software: PyCharm

"""
脚本说明： 数字货币择时指标有效性测试脚本
"""

import os
import pandas as pd
import matplotlib.pyplot as plt
from pylab import mpl
from quant_researcher.quant.project_tool.localize import TEST_DIR, DATA_DIR, TA_TEST_DIR
from quant_researcher.quant.factors.factor_analysis.factor_analyser.factor_analyse_timing import period_lag_corr, \
    window_period_lag_corr, period_lag_regression_test, window_period_lag_regression_test, period_lag_t_test, \
    window_period_lag_t_test, match_tp
from quant_researcher.quant.factors.factor_preprocess.preprocess import ts_percentile_rank_score, ts_diff, \
    ts_percentile_rank_signal, de_trend
from quant_researcher.quant.performance_attribution.core_functions.performance_analysis import performance
from quant_researcher.quant.backtest.holding_backtest import simple_backtest
from quant_researcher.quant.factors.factor_preprocess.preprocess import preprocess_freq_start_date, ts_preprocess_detrend_denoise, ts_threshold_signal, ts_mtm_reversal_signal, de_noise
from quant_researcher.quant.factors.indicator_tools.indicators_util import *

mpl.rcParams['font.sans-serif'] = ['SimHei']  # 设置字体为黑体
mpl.rcParams['axes.unicode_minus'] = False  # 解决中文字体负号显示不正常问题


def periodly_timing_test(origin_factor_series, asset_ret_series):
    for factor_type in ['origin', 'diff_d', 'diff_w', 'diff_m',
                        'origin_roll_pct', 'diff_d_roll_pct', 'diff_w_roll_pct', 'diff_m_roll_pct',
                        'origin_expand_pct', 'diff_d_expand_pct', 'diff_w_expand_pct', 'diff_m_expand_pct']:
        # 判断指标的类型
        if factor_type[:6] == 'origin':
            factor_series = origin_factor_series
        elif factor_type[:6] == 'diff_d':
            factor_series = ts_diff(origin_factor_series, period=1)
        elif factor_type[:6] == 'diff_w':
            factor_series = ts_diff(origin_factor_series, period=7)
        elif factor_type[:6] == 'diff_m':
            factor_series = ts_diff(origin_factor_series, period=30)
        else:
            raise NotImplementedError

        # 判断滚动的方式
        if factor_type[-8:] == 'roll_pct':
            factor_series = ts_percentile_rank_score(factor_series, way='rolling', periods=365, min_periods=365)
        elif factor_type[-10:] == 'expand_pct':
            factor_series = ts_percentile_rank_score(factor_series, way='expanding', min_periods=365)
        else:
            pass  # 不进行滚动

        factor_name = factor_series.name + f'_{factor_type}'
        file_path = os.path.join(TEST_DIR, f'{factor_name}')
        os.makedirs(file_path, exist_ok=True)

        # %% 定期调仓有效性测试
        for freq in ['D', 'W-MON', 'MS']:  # 测试不同的定期调仓频率
            # for freq in ['W-MON']:  # 测试不同的定期调仓频率
            if freq == 'D':
                start_period = [0]
                sample_window = 400  # 滚动测试的最小窗口大小为400天
                rolling_step = 20
            if freq == 'W-MON':
                start_period = [0, 1, 2, 3, 4, 5, 6]  # 测试调仓起始时间为周一，周二，。。。 周日
                sample_window = 60  # 滚动测试的最小窗口大小为60周
                rolling_step = 2
            if freq == 'MS':
                start_period = [0, 4, 9, 14, 19, 24, 29]  # 测试调仓时间为每月1日，5日，10日，15日, ....
                sample_window = 24  # 滚动测试的最小窗口大小为24个月
                rolling_step = 1

            for period in start_period:  # 测试不同的定期调仓起始时间
                factor, ret = preprocess_freq_start_date(factor_series, asset_ret_series, start_period=period,
                                                         freq=freq)

                file_name = os.path.join(file_path, f'{freq}-{period}')

                # 测试滞后相关性
                file_name_1 = file_name + '-lag_corr'
                corr_series, offset = period_lag_corr(factor, ret, lag_period=10, if_plot=True, file_name=file_name_1)

                # 测试滚动滞后相关性（测试相关性与选取窗口是否相关）
                file_name_2 = file_name + '-lag_corr_rolling'
                corr_df, offset_list = window_period_lag_corr(factor, ret, lag_period=10, window=sample_window,
                                                              rolling_step=rolling_step, if_plot=True,
                                                              file_name=file_name_2)

                # 测试单因子滞后回归
                file_name_3 = file_name + '-lag_regression'
                regression_test = period_lag_regression_test(factor, ret, lag_period=10, if_plot=True,
                                                             file_name=file_name_3)

                # 测试滚动单因子滞后回归
                file_name_4 = file_name + '-lag_regression_rolling'
                p_value_df, offset_list = window_period_lag_regression_test(factor, ret, lag_period=10,
                                                                            window=sample_window,
                                                                            rolling_step=rolling_step, if_plot=True,
                                                                            file_name=file_name_4)

                # T检验(需要确定二分位，三分位方法), 只对指标本身，滚动百分位的指标不用再进行T检验
                if factor_type in ['origin', 'diff_d', 'diff_w', 'diff_m']:
                    # 转换信号方式主要测试滚动百分位二分法，历史百分位二分法，滚动百分位三分法，历史百分位三分法
                    for signal_method in ['full_history', 'roll_pct', 'expand_pct']:
                        if signal_method == 'roll_pct':
                            way = 'rolling'
                        elif signal_method == 'expand_pct':
                            way = 'expanding'
                        elif signal_method == 'full_history':
                            way = 'whole'
                        else:
                            raise NotImplementedError

                        for signal_type in ['2', '3']:
                            if signal_type == '2':
                                bins_list = [[0, 0.1, 1], [0, 0.3, 1], [0, 0.5, 1], [0, 0.7, 1], [0, 0.9, 1]]
                            elif signal_type == '3':
                                bins_list = [[0, 0.1, 0.9, 1], [0, 0.2, 0.8, 1], [0, 0.3, 0.7, 1], [0, 0.4, 0.6, 1]]

                            for bins in bins_list:
                                # 下面两个顺序不能反；如果先转变频率再取百分位信号，因为样本量变化了，转变成百分位也会有误差；因此必须先转变为信号，再转变频率
                                signal = ts_percentile_rank_signal(factor_series, way=way, periods=365, min_periods=365,
                                                                   bins=bins)  # 对日频指标先转变成信号

                                new_signal, ret = preprocess_freq_start_date(signal, asset_ret_series,
                                                                             start_period=period,
                                                                             freq=freq)  # 转变成对应的频率和起始时间

                                all_df = pd.concat([new_signal, ret], axis=1)
                                all_df.columns = ['signal', 'ret']
                                all_df.dropna(inplace=True)

                                # 测试滞后T检验
                                file_name_5 = file_name + f'-lag_t-{signal_method}-{signal_type}-{bins}'
                                p_value_series, offset = period_lag_t_test(all_df, lag_period=10, if_plot=True,
                                                                           file_name=file_name_5)

                                # 测试滚动滞后T检验
                                file_name_6 = file_name + f'-lag_t-{signal_method}-{signal_type}-{bins}-rolling'
                                p_value_df, offset_list = window_period_lag_t_test(all_df, lag_period=10,
                                                                                   window=sample_window,
                                                                                   rolling_step=rolling_step,
                                                                                   if_plot=True,
                                                                                   file_name=file_name_6)


def backtest_test(asset, start_date, end_date, origin_factor_series, asset_prices_series, asset_ret_series,
                  all_asset_ret_df, benchmark_ret_df, **kwargs):
    """

    :param str asset: 资产代码
    :param str start_date: '2018-10-01'
    :param str end_date: '2021-10-10'
    :param pd.Series origin_factor_series:
    :param pd.Series asset_ret_series:
    :param pd.DataFrame all_asset_ret_df:
    :param pd.DataFrame benchmark_ret_df:
    :param kwargs:
        - commission: 默认为0，无手续费
    :return:
    """

    commission = kwargs.pop('commission', 0)
    for factor_type in ['origin', 'diff_d', 'diff_w', 'diff_m']:
        # 判断指标的类型
        if factor_type[:6] == 'origin':
            factor_series = origin_factor_series.copy()
            # 原始指标需要检查不做预处理，和做预处理两种情况（去趋势和去噪音）
            preprocess_list = ['False', 'denoise_ma7', 'denoise_ma30', 'denoise_hp', 'detrend_denoise']
        elif factor_type[:4] == 'diff':  # 计算指标差分
            if factor_type[:6] == 'diff_d':
                # 差分类指标，因为已经几乎无趋势，因此只需检测不做预处理和去噪音情况
                factor_series = ts_diff(origin_factor_series, period=1)
                preprocess_list = ['False', 'denoise_hp']  # ma7, ma30与diff_w, diff_m只差7倍，30倍不用重复测试
            elif factor_type[:6] == 'diff_w':
                factor_series = ts_diff(origin_factor_series, period=7)
                preprocess_list = ['False', 'denoise_ma7', 'denoise_ma30', 'denoise_hp']
            elif factor_type[:6] == 'diff_m':
                factor_series = ts_diff(origin_factor_series, period=30)
                preprocess_list = ['False', 'denoise_ma7', 'denoise_ma30', 'denoise_hp']
            factor_series = factor_series[factor_series[~factor_series.isnull()].index[0]:]  # 剔除因为差分导致的NAN
        else:
            raise NotImplementedError

        factor_name = factor_series.name + f'_{factor_type}'
        file_path = os.path.join(TEST_DIR, f'{factor_name}')
        os.makedirs(file_path, exist_ok=True)

        # 指标或者指标的差分保存
        file_name = os.path.join(file_path, f'{factor_name}')
        factor_series.to_csv(f'{file_name}.csv')  # 数据保存

        # %% 不定期调仓回测测试 (不定期调仓运用的是每日收盘调仓)
        perf_list_0 = []  # rolling, expanding, full_history测试区间不同，对应的绩效分开存储
        perf_list_1 = []  # rolling, expanding, full_history测试区间不同，对应的绩效分开存储
        if 'denoise_hp' in preprocess_list:  # 如果预处理方法包含其他， 则把所有预处理方式都提前处理好
            # hp去噪音
            rolling180_denoise_hp_data = ts_preprocess_detrend_denoise(factor_series, 'rolling', periods=180,
                                                                       min_periods=180, if_detrend=False,
                                                                       if_denoise=True)
            file_name = os.path.join(file_path, f'rolling180_denoise_hp_data')
            rolling180_denoise_hp_data.to_excel(f'{file_name}.xlsx')
            rolling365_denoise_hp_data = ts_preprocess_detrend_denoise(factor_series, 'rolling', periods=365,
                                                                       min_periods=365, if_detrend=False,
                                                                       if_denoise=True)
            file_name = os.path.join(file_path, f'rolling365_denoise_hp_data')
            rolling365_denoise_hp_data.to_excel(f'{file_name}.xlsx')
            rolling730_denoise_hp_data = ts_preprocess_detrend_denoise(factor_series, 'rolling', periods=730,
                                                                       min_periods=730, if_detrend=False,
                                                                       if_denoise=True)
            file_name = os.path.join(file_path, f'rolling730_denoise_hp_data')
            rolling730_denoise_hp_data.to_excel(f'{file_name}.xlsx')
            rolling1460_denoise_hp_data = ts_preprocess_detrend_denoise(factor_series, 'rolling', periods=1460,
                                                                        min_periods=1460, if_detrend=False,
                                                                        if_denoise=True)
            file_name = os.path.join(file_path, f'rolling1460_denoise_hp_data')
            rolling1460_denoise_hp_data.to_excel(f'{file_name}.xlsx')
            expanding_denoise_hp_data = ts_preprocess_detrend_denoise(factor_series, 'expanding', periods=None,
                                                                      min_periods=365, if_detrend=False,
                                                                      if_denoise=True)
            file_name = os.path.join(file_path, f'expanding_denoise_hp_data')
            expanding_denoise_hp_data.to_excel(f'{file_name}.xlsx')
            # todo 注意，其实上面的结果是一样的，不用重复计算，不过因为为了查找数据方便，都分别计算保存了
            whole_denoise_hp_data = ts_preprocess_detrend_denoise(factor_series, 'whole', if_detrend=False,
                                                                  if_denoise=True)
            file_name = os.path.join(file_path, f'whole_denoise_hp_data')
            whole_denoise_hp_data.to_excel(f'{file_name}.xlsx')

            rolling180_denoise_hp_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile',
                                                                 periods=180, min_periods=180, positive=True,
                                                                 scale=100, preprocess=True, if_detrend=False,
                                                                 if_denoise=True)
            rolling365_denoise_hp_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile',
                                                                 periods=365, min_periods=365, positive=True,
                                                                 scale=100, preprocess=True, if_detrend=False,
                                                                 if_denoise=True)
            rolling730_denoise_hp_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile',
                                                                 periods=730, min_periods=730, positive=True,
                                                                 scale=100, preprocess=True, if_detrend=False,
                                                                 if_denoise=True)
            rolling1460_denoise_hp_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile',
                                                                  periods=1460, min_periods=1460, positive=True,
                                                                  scale=100, preprocess=True, if_detrend=False,
                                                                  if_denoise=True)
            expanding_denoise_hp_pct = ts_percentile_rank_score(factor_series, way='expanding', rank_method='quantile',
                                                                periods=None, min_periods=365, positive=True, scale=100,
                                                                preprocess=True, if_detrend=False, if_denoise=True)
            whole_denoise_hp_pct = ts_percentile_rank_score(factor_series, way='whole', rank_method='quantile',
                                                            positive=True, scale=100, preprocess=True,
                                                            if_detrend=False, if_denoise=True)

        if 'denoise_ma7' in preprocess_list:
            # ma7去噪音
            denoise_ma7_data = ts_preprocess_detrend_denoise(factor_series, 'whole', if_detrend=False, if_denoise=True,
                                                             denoise_method='ma',
                                                             denoise_kwargs={'periods': 7, 'min_periods': 7})
            file_name = os.path.join(file_path, f'denoise_ma7_data')
            denoise_ma7_data.to_excel(f'{file_name}.xlsx')

            rolling180_denoise_ma7_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile',
                                                                  periods=180, min_periods=180, positive=True,
                                                                  scale=100, preprocess=True, if_detrend=False,
                                                                  if_denoise=True, denoise_method='ma',
                                                                  denoise_kwargs={'periods': 7, 'min_periods': 7})
            rolling365_denoise_ma7_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile',
                                                                  periods=365, min_periods=365, positive=True,
                                                                  scale=100, preprocess=True, if_detrend=False,
                                                                  if_denoise=True, denoise_method='ma',
                                                                  denoise_kwargs={'periods': 7, 'min_periods': 7})
            rolling730_denoise_ma7_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile',
                                                                  periods=730, min_periods=730, positive=True,
                                                                  scale=100, preprocess=True, if_detrend=False,
                                                                  if_denoise=True, denoise_method='ma',
                                                                  denoise_kwargs={'periods': 7, 'min_periods': 7})
            rolling1460_denoise_ma7_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile',
                                                                   periods=1460, min_periods=1460, positive=True,
                                                                   scale=100, preprocess=True, if_detrend=False,
                                                                   if_denoise=True, denoise_method='ma',
                                                                   denoise_kwargs={'periods': 7, 'min_periods': 7})
            expanding_denoise_ma7_pct = ts_percentile_rank_score(factor_series, way='expanding', rank_method='quantile',
                                                                 periods=None, min_periods=365, positive=True,
                                                                 scale=100, preprocess=True, if_detrend=False,
                                                                 if_denoise=True, denoise_method='ma',
                                                                 denoise_kwargs={'periods': 7, 'min_periods': 7})
            whole_denoise_ma7_pct = ts_percentile_rank_score(factor_series, way='whole', rank_method='quantile',
                                                             positive=True, scale=100, preprocess=True,
                                                             if_detrend=False, if_denoise=True, denoise_method='ma',
                                                             denoise_kwargs={'periods': 7, 'min_periods': 7})

        if 'denoise_ma30' in preprocess_list:
            # ma30去噪音
            denoise_ma30_data = ts_preprocess_detrend_denoise(factor_series, 'whole', if_detrend=False, if_denoise=True,
                                                              denoise_method='ma',
                                                              denoise_kwargs={'periods': 30, 'min_periods': 30})
            file_name = os.path.join(file_path, f'denoise_ma30_data')
            denoise_ma30_data.to_excel(f'{file_name}.xlsx')

            rolling180_denoise_ma30_pct = ts_percentile_rank_score(factor_series, way='rolling',
                                                                   rank_method='quantile', periods=180,
                                                                   min_periods=180, positive=True, scale=100,
                                                                   preprocess=True, if_detrend=False, if_denoise=True,
                                                                   denoise_method='ma',
                                                                   denoise_kwargs={'periods': 30, 'min_periods': 30})
            rolling365_denoise_ma30_pct = ts_percentile_rank_score(factor_series, way='rolling',
                                                                   rank_method='quantile', periods=365,
                                                                   min_periods=365, positive=True, scale=100,
                                                                   preprocess=True, if_detrend=False, if_denoise=True,
                                                                   denoise_method='ma',
                                                                   denoise_kwargs={'periods': 30, 'min_periods': 30})
            rolling730_denoise_ma30_pct = ts_percentile_rank_score(factor_series, way='rolling',
                                                                   rank_method='quantile', periods=730,
                                                                   min_periods=730, positive=True, scale=100,
                                                                   preprocess=True, if_detrend=False, if_denoise=True,
                                                                   denoise_method='ma',
                                                                   denoise_kwargs={'periods': 30, 'min_periods': 30})
            rolling1460_denoise_ma30_pct = ts_percentile_rank_score(factor_series, way='rolling',
                                                                    rank_method='quantile', periods=1460,
                                                                    min_periods=1460, positive=True, scale=100,
                                                                    preprocess=True, if_detrend=False, if_denoise=True,
                                                                    denoise_method='ma',
                                                                    denoise_kwargs={'periods': 30, 'min_periods': 30})
            expanding_denoise_ma30_pct = ts_percentile_rank_score(factor_series, way='expanding',
                                                                  rank_method='quantile', periods=None,
                                                                  min_periods=365, positive=True, scale=100,
                                                                  preprocess=True, if_detrend=False, if_denoise=True,
                                                                  denoise_method='ma',
                                                                  denoise_kwargs={'periods': 30, 'min_periods': 30})
            whole_denoise_ma30_pct = ts_percentile_rank_score(factor_series, way='whole', rank_method='quantile',
                                                              positive=True, scale=100, preprocess=True,
                                                              if_detrend=False, if_denoise=True, denoise_method='ma',
                                                              denoise_kwargs={'periods': 30, 'min_periods': 30})

        if 'detrend_denoise' in preprocess_list:
            # 去趋势（减去120均线），去噪音（hp）
            rolling180_detrend_denoise_data = ts_preprocess_detrend_denoise(factor_series, 'rolling', periods=180,
                                                                            min_periods=180, if_detrend=True,
                                                                            detrend_method='ma',
                                                                            detrend_kwargs={'periods': 120},
                                                                            if_denoise=True)
            file_name = os.path.join(file_path, f'rolling180_detrend_denoise_data')
            rolling180_detrend_denoise_data.to_excel(f'{file_name}.xlsx')
            rolling365_detrend_denoise_data = ts_preprocess_detrend_denoise(factor_series, 'rolling', periods=365,
                                                                            min_periods=365, if_detrend=True,
                                                                            detrend_method='ma',
                                                                            detrend_kwargs={'periods': 120},
                                                                            if_denoise=True)
            file_name = os.path.join(file_path, f'rolling365_detrend_denoise_data')
            rolling365_detrend_denoise_data.to_excel(f'{file_name}.xlsx')
            rolling730_detrend_denoise_data = ts_preprocess_detrend_denoise(factor_series, 'rolling', periods=730,
                                                                            min_periods=730, if_detrend=True,
                                                                            detrend_method='ma',
                                                                            detrend_kwargs={'periods': 120},
                                                                            if_denoise=True)
            file_name = os.path.join(file_path, f'rolling730_detrend_denoise_data')
            rolling730_detrend_denoise_data.to_excel(f'{file_name}.xlsx')
            rolling1460_detrend_denoise_data = ts_preprocess_detrend_denoise(factor_series, 'rolling', periods=1460,
                                                                             min_periods=1460, if_detrend=True,
                                                                             detrend_method='ma',
                                                                             detrend_kwargs={'periods': 120},
                                                                             if_denoise=True)
            file_name = os.path.join(file_path, f'rolling1460_detrend_denoise_data')
            rolling1460_detrend_denoise_data.to_excel(f'{file_name}.xlsx')
            expanding_detrend_denoise_data = ts_preprocess_detrend_denoise(factor_series, 'expanding', periods=None,
                                                                           min_periods=365, if_detrend=True,
                                                                           detrend_method='ma',
                                                                           detrend_kwargs={'periods': 120},
                                                                           if_denoise=True)
            file_name = os.path.join(file_path, f'expanding_detrend_denoise_data')
            expanding_detrend_denoise_data.to_excel(f'{file_name}.xlsx')
            # todo 注意，其实上面的结果是一样的，不用重复计算，不过因为为了查找数据方便，都分别计算保存了
            whole_detrend_denoise_data = ts_preprocess_detrend_denoise(factor_series, 'whole', if_detrend=True,
                                                                       detrend_method='ma',
                                                                       detrend_kwargs={'periods': 120},
                                                                       if_denoise=True)
            file_name = os.path.join(file_path, f'whole_detrend_denoise_data')
            whole_detrend_denoise_data.to_excel(f'{file_name}.xlsx')

            rolling180_detrend_denoise_pct = ts_percentile_rank_score(factor_series, way='rolling',
                                                                      rank_method='quantile', periods=180,
                                                                      min_periods=180, positive=True, scale=100,
                                                                      preprocess=True, if_detrend=True, if_denoise=True,
                                                                      detrend_method='ma',
                                                                      detrend_kwargs={'periods': 120})
            rolling365_detrend_denoise_pct = ts_percentile_rank_score(factor_series, way='rolling',
                                                                      rank_method='quantile', periods=365,
                                                                      min_periods=365, positive=True, scale=100,
                                                                      preprocess=True, if_detrend=True, if_denoise=True,
                                                                      detrend_method='ma',
                                                                      detrend_kwargs={'periods': 120})
            rolling730_detrend_denoise_pct = ts_percentile_rank_score(factor_series, way='rolling',
                                                                      rank_method='quantile', periods=730,
                                                                      min_periods=730, positive=True, scale=100,
                                                                      preprocess=True, if_detrend=True, if_denoise=True,
                                                                      detrend_method='ma',
                                                                      detrend_kwargs={'periods': 120})
            rolling1460_detrend_denoise_pct = ts_percentile_rank_score(factor_series, way='rolling',
                                                                       rank_method='quantile', periods=1460,
                                                                       min_periods=1460, positive=True, scale=100,
                                                                       preprocess=True, if_detrend=True,
                                                                       if_denoise=True,
                                                                       detrend_method='ma',
                                                                       detrend_kwargs={'periods': 120})
            expanding_detrend_denoise_pct = ts_percentile_rank_score(factor_series, way='expanding',
                                                                     rank_method='quantile', periods=None,
                                                                     min_periods=365, positive=True, scale=100,
                                                                     preprocess=True, if_detrend=True, if_denoise=True,
                                                                     detrend_method='ma',
                                                                     detrend_kwargs={'periods': 120})
            whole_detrend_denoise_pct = ts_percentile_rank_score(factor_series, way='whole', rank_method='quantile',
                                                                 positive=True, scale=100, preprocess=True,
                                                                 if_detrend=True, if_denoise=True, detrend_method='ma',
                                                                 detrend_kwargs={'periods': 120})

        if 'False' in preprocess_list:
            rolling180_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile', periods=180,
                                                      min_periods=180, positive=True, scale=100, preprocess=False)
            rolling365_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile', periods=365,
                                                      min_periods=365, positive=True, scale=100, preprocess=False)
            rolling730_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile', periods=730,
                                                      min_periods=730, positive=True, scale=100, preprocess=False)
            rolling1460_pct = ts_percentile_rank_score(factor_series, way='rolling', rank_method='quantile',
                                                       periods=1460,
                                                       min_periods=1460, positive=True, scale=100, preprocess=False)
            expanding_pct = ts_percentile_rank_score(factor_series, way='expanding', rank_method='quantile',
                                                     periods=None, min_periods=365, positive=True, scale=100,
                                                     preprocess=False)
            whole_pct = ts_percentile_rank_score(factor_series, way='whole', rank_method='quantile', positive=True,
                                                 scale=100, preprocess=False)

        for preprocess in preprocess_list:  # 指标是否需要进行预处理，预处理包去趋势和降噪
            # 转换信号方式主要测试滚动百分位二分法三分法，历史百分位二分法三分法, 全历史百分位二分法三分法
            for signal_method in ['full_history',
                                  'roll180_pct', 'roll365_pct', 'roll730_pct', 'roll1460_pct',
                                  'expand_pct']:
                if signal_method[0:4] == 'roll':  # 滚动法
                    way = 'rolling'
                elif signal_method == 'expand_pct':
                    way = 'expanding'
                elif signal_method == 'full_history':
                    way = 'whole'
                else:
                    raise NotImplementedError

                for signal_type in ['2', '3', 'holding_pct', 'threshold']:
                    for positive in [False, True]:  # 指标是越小越好，还是越大越好
                        if (signal_method == 'roll180_pct') & (preprocess == 'denoise_ma7'):
                            rank_score = rolling180_denoise_ma7_pct
                            threshold_data = denoise_ma7_data.copy()
                        elif (signal_method == 'roll180_pct') & (preprocess == 'denoise_ma30'):
                            rank_score = rolling180_denoise_ma30_pct
                            threshold_data = denoise_ma30_data.copy()
                        elif (signal_method == 'roll180_pct') & (preprocess == 'denoise_hp'):
                            rank_score = rolling180_denoise_hp_pct
                            threshold_data = rolling180_denoise_hp_data.copy()
                        elif (signal_method == 'roll180_pct') & (preprocess == 'detrend_denoise'):
                            rank_score = rolling180_detrend_denoise_pct
                            threshold_data = rolling180_detrend_denoise_data.copy()
                        elif (signal_method == 'roll180_pct') & (preprocess == 'False'):
                            rank_score = rolling180_pct
                            threshold_data = factor_series.copy()
                        elif (signal_method == 'roll365_pct') & (preprocess == 'denoise_ma7'):
                            rank_score = rolling365_denoise_ma7_pct
                            threshold_data = denoise_ma7_data.copy()
                        elif (signal_method == 'roll365_pct') & (preprocess == 'denoise_ma30'):
                            rank_score = rolling365_denoise_ma30_pct
                            threshold_data = denoise_ma30_data.copy()
                        elif (signal_method == 'roll365_pct') & (preprocess == 'denoise_hp'):
                            rank_score = rolling365_denoise_hp_pct
                            threshold_data = rolling365_denoise_hp_data.copy()
                        elif (signal_method == 'roll365_pct') & (preprocess == 'detrend_denoise'):
                            rank_score = rolling365_detrend_denoise_pct
                            threshold_data = rolling365_detrend_denoise_data.copy()
                        elif (signal_method == 'roll365_pct') & (preprocess == 'False'):
                            rank_score = rolling365_pct
                            threshold_data = factor_series.copy()
                        elif (signal_method == 'roll730_pct') & (preprocess == 'denoise_ma7'):
                            rank_score = rolling730_denoise_ma7_pct
                            threshold_data = denoise_ma7_data.copy()
                        elif (signal_method == 'roll730_pct') & (preprocess == 'denoise_ma30'):
                            rank_score = rolling730_denoise_ma30_pct
                            threshold_data = denoise_ma30_data.copy()
                        elif (signal_method == 'roll730_pct') & (preprocess == 'denoise_hp'):
                            rank_score = rolling730_denoise_hp_pct
                            threshold_data = rolling730_denoise_hp_data.copy()
                        elif (signal_method == 'roll730_pct') & (preprocess == 'detrend_denoise'):
                            rank_score = rolling730_detrend_denoise_pct
                            threshold_data = rolling730_detrend_denoise_data.copy()
                        elif (signal_method == 'roll730_pct') & (preprocess == 'False'):
                            rank_score = rolling730_pct
                            threshold_data = factor_series.copy()
                        elif (signal_method == 'roll1460_pct') & (preprocess == 'denoise_ma7'):
                            rank_score = rolling1460_denoise_ma7_pct
                            threshold_data = denoise_ma7_data.copy()
                        elif (signal_method == 'roll1460_pct') & (preprocess == 'denoise_ma30'):
                            rank_score = rolling1460_denoise_ma30_pct
                            threshold_data = denoise_ma30_data.copy()
                        elif (signal_method == 'roll1460_pct') & (preprocess == 'denoise_hp'):
                            rank_score = rolling1460_denoise_hp_pct
                            threshold_data = rolling1460_denoise_hp_data.copy()
                        elif (signal_method == 'roll1460_pct') & (preprocess == 'detrend_denoise'):
                            rank_score = rolling1460_detrend_denoise_pct
                            threshold_data = rolling1460_detrend_denoise_data.copy()
                        elif (signal_method == 'roll1460_pct') & (preprocess == 'False'):
                            rank_score = rolling1460_pct
                            threshold_data = factor_series.copy()
                        elif (signal_method == 'expand_pct') & (preprocess == 'denoise_ma7'):
                            rank_score = expanding_denoise_ma7_pct
                            threshold_data = denoise_ma7_data.copy()
                        elif (signal_method == 'expand_pct') & (preprocess == 'denoise_ma30'):
                            rank_score = expanding_denoise_ma30_pct
                            threshold_data = denoise_ma30_data.copy()
                        elif (signal_method == 'expand_pct') & (preprocess == 'denoise_hp'):
                            rank_score = expanding_denoise_hp_pct
                            threshold_data = expanding_denoise_hp_data.copy()
                        elif (signal_method == 'expand_pct') & (preprocess == 'detrend_denoise'):
                            rank_score = expanding_detrend_denoise_pct
                            threshold_data = expanding_detrend_denoise_data.copy()
                        elif (signal_method == 'expand_pct') & (preprocess == 'False'):
                            rank_score = expanding_pct
                            threshold_data = factor_series.copy()
                        elif (signal_method == 'full_history') & (preprocess == 'denoise_ma7'):
                            rank_score = whole_denoise_ma7_pct
                            threshold_data = denoise_ma7_data.copy()
                        elif (signal_method == 'full_history') & (preprocess == 'denoise_ma30'):
                            rank_score = whole_denoise_ma30_pct
                            threshold_data = denoise_ma30_data.copy()
                        elif (signal_method == 'full_history') & (preprocess == 'denoise_hp'):
                            rank_score = whole_denoise_hp_pct
                            threshold_data = whole_denoise_hp_data.copy()
                        elif (signal_method == 'full_history') & (preprocess == 'detrend_denoise'):
                            rank_score = whole_detrend_denoise_pct
                            threshold_data = whole_detrend_denoise_data.copy()
                        elif (signal_method == 'full_history') & (preprocess == 'False'):
                            rank_score = whole_pct
                            threshold_data = factor_series.copy()

                        if signal_type == '2':
                            bins_list = [[0, 0.05, 1], [0, 0.1, 1], [0, 0.3, 1], [0, 0.5, 1], [0, 0.7, 1],
                                         [0, 0.9, 1], [0, 0.95, 1]]
                        elif signal_type == '3':
                            # 除了测试0.1 0.9； 0.2 0.8这样的对称的分为点； 还需测试0.3， 0.9等这样非对称的分位点
                            bins_list = [[0, 0.05, 0.95, 1], [0, 0.1, 0.9, 1], [0, 0.2, 0.8, 1], [0, 0.3, 0.7, 1],
                                         [0, 0.4, 0.6, 1],  # 以上为对称的阈值三分位； 下面为非对称的三分位法
                                         [0, 0.1, 0.95, 1], [0, 0.3, 0.95, 1], [0, 0.5, 0.95, 1],
                                         [0, 0.05, 0.9, 1], [0, 0.3, 0.9, 1], [0, 0.5, 0.9, 1],
                                         [0, 0.05, 0.7, 1], [0, 0.1, 0.7, 1], [0, 0.5, 0.7, 1],
                                         [0, 0.05, 0.5, 1], [0, 0.1, 0.5, 1], [0, 0.3, 0.5, 1]]

                        elif signal_type == 'threshold':
                            # 测试5%, 10%, 20%， 40%， 60%， 80%， 90%， 95% 分位点阈值
                            if preprocess == 'False':
                                temp = factor_series.copy()
                            else:
                                temp = threshold_data.copy()
                            threshold_list = list(np.linspace(temp.min(), temp.max(), 6))
                            threshold_list = threshold_list[1:-1]
                            threshold_list.append(temp.min() + 0.05 * (temp.max() - temp.min()))
                            threshold_list.append(temp.min() + 0.1 * (temp.max() - temp.min()))
                            threshold_list.append(temp.min() + 0.9 * (temp.max() - temp.min()))
                            threshold_list.append(temp.min() + 0.95 * (temp.max() - temp.min()))
                            bins_list = [[temp.min(), i, temp.max()] for i in threshold_list]

                            # 如果是阈值检测，默认检测0,1；因为0,1往往有特殊含义
                            if bins_list[0][0] < 0 < bins_list[0][2]:
                                bins_list.append([bins_list[0][0], 0, bins_list[0][2]])
                            if bins_list[0][0] < 1 < bins_list[0][2]:
                                bins_list.append([bins_list[0][0], 1, bins_list[0][2]])

                            # 阈值也加入三分位，经常检测是的 5% 95%； 10% 90%； 20% 80%； 30% 70%； 40% 60%；
                            #                              5% 90%； 30% 90%； 50% 90%
                            #                              10% 95%； 30% 95%； 50% 95%
                            #                              10%, 70%； 10%，50%； 5% 70%； 5% 50%
                            quantile_95 = bins_list[0][0] + 0.95 * (bins_list[0][2] - bins_list[0][0])
                            quantile_90 = bins_list[0][0] + 0.9 * (bins_list[0][2] - bins_list[0][0])
                            quantile_80 = bins_list[0][0] + 0.8 * (bins_list[0][2] - bins_list[0][0])
                            quantile_70 = bins_list[0][0] + 0.7 * (bins_list[0][2] - bins_list[0][0])
                            quantile_60 = bins_list[0][0] + 0.6 * (bins_list[0][2] - bins_list[0][0])
                            quantile_50 = bins_list[0][0] + 0.5 * (bins_list[0][2] - bins_list[0][0])
                            quantile_40 = bins_list[0][0] + 0.4 * (bins_list[0][2] - bins_list[0][0])
                            quantile_30 = bins_list[0][0] + 0.3 * (bins_list[0][2] - bins_list[0][0])
                            quantile_20 = bins_list[0][0] + 0.2 * (bins_list[0][2] - bins_list[0][0])
                            quantile_10 = bins_list[0][0] + 0.1 * (bins_list[0][2] - bins_list[0][0])
                            quantile_5 = bins_list[0][0] + 0.05 * (bins_list[0][2] - bins_list[0][0])

                            bins_list.append([bins_list[0][0], quantile_5, quantile_95, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_10, quantile_90, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_20, quantile_80, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_30, quantile_70, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_40, quantile_60, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_5, quantile_90, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_30, quantile_90, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_50, quantile_90, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_10, quantile_95, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_30, quantile_95, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_50, quantile_95, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_5, quantile_70, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_10, quantile_70, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_50, quantile_70, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_5, quantile_50, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_10, quantile_50, bins_list[0][2]])
                            bins_list.append([bins_list[0][0], quantile_30, quantile_50, bins_list[0][2]])

                        else:  # 仓位法不需要bins
                            bins_list = [[]]

                        for bins in bins_list:
                            if len(bins) == 4:  # 三分法需要测试左侧交易，右侧交易
                                left_right = ['left', 'right']
                            elif len(bins) == 3:  # 两分法不需要区分左右侧交易
                                left_right = ['left']
                            elif len(bins) == 0:  # 仓位法不需要区分左右侧交易
                                left_right = ['left']
                            else:
                                raise NotImplementedError

                            for left_right_trading in left_right:
                                if signal_type in ['2', '3']:
                                    # 对得分指标先转变成买卖信号
                                    signal = ts_percentile_rank_signal(rank_score, is_score=True, positive=positive,
                                                                       bins=bins, left_right=left_right_trading)
                                elif signal_type == 'threshold':
                                    if (preprocess == 'False') or (preprocess == 'denoise_ma7') or (
                                            preprocess == 'denoise_ma30'):
                                        # 因为如果数据不做预处理或者采用均线处理的话，
                                        # 根据阈值转换信号，full_history, roll_pct, expand_pct下结果一样，
                                        # 所以roll_pct, expand_pct直接跳过
                                        if signal_method == 'full_history':
                                            signal = ts_threshold_signal(threshold_data, positive=positive, bins=bins,
                                                                         left_right=left_right_trading)
                                        else:
                                            continue
                                    elif (preprocess == 'denoise_hp') or (preprocess == 'detrend_denoise'):
                                        # 因为如果数据利用了HP去噪音的话或者数据利用了减均线去趋势HP去噪音的话，
                                        # 根据阈值转换信号，roll_pct, expand_pct下结果一样，
                                        # 所以除了roll180_pct， 其他直接跳过
                                        if (signal_method == 'full_history') or (signal_method == 'roll180_pct'):
                                            signal = ts_threshold_signal(threshold_data, positive=positive, bins=bins,
                                                                         left_right=left_right_trading)
                                        else:
                                            continue
                                elif signal_type == 'holding_pct':
                                    signal = rank_score / 100
                                else:
                                    raise NotImplementedError

                                if signal.isnull().all():  # 信号全为NaN或者signal为空的series, 跳过该测试情形
                                    print(
                                        f'{preprocess}-{signal_method}-{signal_type}-{bins}-{positive}-{left_right_trading}情形下信号全为NaN, 无法测试')
                                    continue

                                # 交易信号以及对应的交易价格
                                trade_date_signal = signal[signal != signal.shift(1)]
                                trade_date_signal_prices = pd.concat([trade_date_signal, asset_prices_series],
                                                                     axis=1).dropna()
                                trade_date_signal_prices.columns = ['signal', 'price']
                                file_name = os.path.join(file_path,
                                                         f'preprocess_{preprocess}-{signal_method}-{signal_type}-{bins}-{positive}-{left_right_trading}')
                                trade_date_signal_prices.to_excel(f'{file_name}.xlsx')

                                # signal 需要滞后一期使用
                                signal = signal.shift(1)
                                signal.dropna(inplace=True)
                                asset_weights_df = pd.DataFrame(signal).reset_index()
                                asset_weights_df.columns = ['tradedate', 'weights']
                                asset_weights_df['code'] = asset

                                # 回测绩效分析
                                if signal.isnull().all():  # 滞后一期后信号全为NaN或者signal为空的series, 跳过该测试情形
                                    continue
                                else:
                                    start_date = signal.index[0]
                                # if start_date not in test_period_start_date:
                                #     test_period_start_date.append(start_date)

                                portfolio_equity, benchmark_equity, portfolio_ret, perf = simple_backtest(
                                    asset_weights_df=asset_weights_df,
                                    asset_ret_df=all_asset_ret_df,
                                    asset_type='crypto',
                                    start_date=start_date,
                                    end_date=end_date,
                                    benchmark=asset,
                                    benchmark_type='crypto',
                                    benchmark_ret_df=benchmark_ret_df,
                                    if_plot=False,
                                    file_name=file_name,
                                    commission=commission,
                                    log_ret=True,
                                    long_short='long_only')

                                if signal_type in ['2', '3', 'threshold']:  # 仓位信号没办法统计胜率
                                    # 交易胜率分析
                                    winning_test_data = pd.concat([signal, asset_ret_series], axis=1).dropna()
                                    winning_test_data.columns = ['signal', 'ret']

                                    best_buy_period = pd.DataFrame([['2018-12-05', '2019-03-30'],
                                                                    ['2020-03-12', '2020-04-01'],
                                                                    ['2020-04-26', '2020-10-14'],
                                                                    ['2021-07-12', '2021-07-25']],
                                                                   columns=['start', 'end'])
                                    best_sell_period = pd.DataFrame([['2017-12-07', '2018-01-15'],
                                                                     ['2019-06-21', '2019-08-25'],
                                                                     ['2020-02-01', '2020-03-10'],
                                                                     ['2021-02-10', '2021-05-16']],
                                                                    columns=['start', 'end'])

                                    win_rate_analysis = performance.winning_rate(winning_test_data,
                                                                                 best_buy_period=best_buy_period,
                                                                                 best_sell_period=best_sell_period)
                                    win_rate_analysis.name = '组合策略绩效'
                                    win_rate_analysis = pd.DataFrame(win_rate_analysis)
                                    win_rate_analysis[perf.columns[1]] = np.NAN
                                    perf = pd.concat([perf, win_rate_analysis], axis=0)

                                    # 买卖点质量分析（买卖点是否处在局部最高或者局部最低）

                                perf.rename(columns={
                                    '组合策略绩效': f'{factor_type}-preprocess_{preprocess}-{signal_method}-{signal_type}-{bins}-{positive}-{left_right_trading}'},
                                    inplace=True)

                                if (way == 'rolling') or (way == 'expanding'):
                                    perf_list = perf_list_0
                                    benchmark_perf_0 = perf.iloc[:, 1]
                                elif way == 'whole':
                                    perf_list = perf_list_1
                                    benchmark_perf_1 = perf.iloc[:, 1]
                                else:
                                    print('目前信号转换方法暂不支持该方法')
                                    return
                                perf_list.append(perf.iloc[:, 0])

        all_perf_list = []
        for i in ['rolling or expanding', 'whole']:
            if i == 'rolling or expanding' and perf_list_0:  # perf_list_0非空， 即rolling及expanding下的信号有对应策略
                perf_list = perf_list_0.copy()
                perf_list.append(benchmark_perf_0)
            elif i == 'whole' and perf_list_1:  # perf_list_1非空， 即full_history下的信号有对应策略
                perf_list = perf_list_1.copy()
                perf_list.append(benchmark_perf_1)

            all_perf_1 = pd.concat(perf_list, axis=1)
            all_perf_1 = all_perf_1.astype(str)
            all_perf_1.loc['主动管理超额年化收益率', :] = [float(i[:-1]) if i != 'nan' else 0 for i in
                                                all_perf_1.loc['主动管理超额年化收益率', :]]
            all_perf_1.sort_values(by=['主动管理超额年化收益率'], axis=1, ascending=False, inplace=True)
            all_perf_1.loc['主动管理超额年化收益率', :] = [f"{float(i):.2f}%" for i in all_perf_1.loc['主动管理超额年化收益率', :]]
            file_name = os.path.join(file_path, f'backtest_result_{i}')
            all_perf_1.to_excel((f'{file_name}.xlsx'))

            all_perf_list.append(all_perf_1)

        all_perf = pd.concat(all_perf_list, axis=1)
        all_perf.loc['主动管理超额年化收益率', :] = [float(i[:-1]) for i in all_perf.loc['主动管理超额年化收益率', :]]
        all_perf.sort_values(by=['主动管理超额年化收益率'], axis=1, ascending=False, inplace=True)
        all_perf.loc['主动管理超额年化收益率', :] = [f"{float(i):.2f}%" for i in all_perf.loc['主动管理超额年化收益率', :]]
        file_name = os.path.join(file_path, f'backtest_result_all')
        all_perf.to_excel((f'{file_name}.xlsx'))

    return all_perf


def ta_indic_backtest_test(asset, start_date, end_date, ta_func, ohlcv_data, prices_df, asset_ret_series,
                           all_asset_ret_df, benchmark_ret_df, **kwargs):
    """

    :param str asset: 资产代码
    :param str start_date: '2018-10-01'
    :param str end_date: '2021-10-10'
    :param pd.Series origin_factor_series:
    :param pd.Series asset_ret_series:
    :param pd.DataFrame all_asset_ret_df:
    :param pd.DataFrame benchmark_ret_df:
    :param kwargs:
        - commission: 默认为0，无手续费
    :return:
    """

    commission = kwargs.pop('commission', 0)

    ta_func_value_range = ta_func_util_dict[ta_func.__name__]['reasonable_range']
    ta_func_method_list = ta_func_util_dict[ta_func.__name__]['method']

    # 技术指标本身的入参
    func_vars = ta_func.__code__.co_varnames[:ta_func.__code__.co_argcount]
    if 'period' not in func_vars:
        period_list_for_test = [False]
    else:
        period_list_for_test = period_list.copy()
    if 'ma_period' not in func_vars:
        ma_period_list_for_test = [False]
    else:
        ma_period_list_for_test = ma_period_list.copy()
    if 'short_period' not in func_vars:
        short_long_period_list_for_test = [(False, False)]
    else:
        short_long_period_list_for_test = short_long_period_list.copy()

    def get_indic_data(ta_func, func_vars, period, ma_period, short_long_period):
        try:
            if len(func_vars) == 1:
                indic_data = ta_func(ohlcv_data)
            elif ('period' in func_vars) and len(func_vars) == 2:
                indic_data = ta_func(ohlcv_data, period=period)
            elif ('ma_period' in func_vars) and len(func_vars) == 2:
                indic_data = ta_func(ohlcv_data, ma_period=ma_period)
            elif 'period' in func_vars and 'ma_period' in func_vars and len(func_vars) == 3:
                indic_data = ta_func(ohlcv_data, period=period, ma_period=ma_period)
            elif 'short_period' in func_vars and 'long_period' in func_vars and len(func_vars) == 3:
                indic_data = ta_func(ohlcv_data, short_period=short_long_period[0], long_period=short_long_period[1])
            elif 'short_period' in func_vars and 'long_period' in func_vars and 'ma_period' in func_vars:
                indic_data = ta_func(ohlcv_data, short_period=short_long_period[0],
                                     long_period=short_long_period[1], ma_period=ma_period)
            return indic_data
        except Exception as e:
            print(
                f'{ta_func.__name__}对应的参数period-{period}-ma_period-{ma_period}-short_long_period-{short_long_period}无法计算，错误为{str(e)}')
            return None

    all_perf_list = []
    for method in ta_func_method_list:
        util_period_list_for_test = [False]
        if isinstance(method, str):
            if ('tangent' in method):
                util_period_list_for_test = tangent_period_list.copy()
            elif (method[:9] == 'sma_cross') or ('ema_cross' in method):
                util_period_list_for_test = ma_period_list.copy()
            elif ('longshort' in method):
                util_period_list_for_test = short_long_period_list.copy()
            elif ('lsma_cross' in method):
                util_period_list_for_test = short_long_ma_period_list.copy()
            method_name = method
        else:  # method 为特殊使用法，输入的method为函数对象
            method_func_vars = method.__code__.co_varnames[:method.__code__.co_argcount]
            if 'period' in method_func_vars:
                period_list_for_test = period_list.copy()
            if 'ma_period' in method_func_vars:
                ma_period_list_for_test = ma_period_list.copy()
            if 'short_period' in method_func_vars:
                short_long_period_list_for_test = short_long_period_list.copy()
            method_name = method.__name__

        for period in period_list_for_test:
            for ma_period in ma_period_list_for_test:
                for short_long_period in short_long_period_list_for_test:
                    # 检测下技术指标计算是否正常，异常的话直接跳过
                    indic_data = get_indic_data(ta_func, func_vars, period, ma_period, short_long_period)
                    if (indic_data is None) or (indic_data.isnull().values.all()) or (indic_data.empty):
                        print(f'{ta_func.__name__}对应的参数period-{period}-ma_period-{ma_period}-short_long_period-{short_long_period}计算结果为None')
                        continue
                    if len(indic_data.columns) > 1:
                        print(f'{ta_func.__name__}函数出参包含多列，需要进一步指定')
                        continue
                    for util_period in util_period_list_for_test:
                        if isinstance(method, str):
                            if method == 'enumeration':
                                signal_data = enumeration(indic_data.iloc[:, 0])
                            elif 'threshold' in method:
                                signal_data = indic_data.iloc[:, 0]
                            elif 'lsma_cross_minus' in method:
                                signal_data = lsma_cross_minus(indic_data.iloc[:, 0], util_period=util_period)
                            elif 'lsma_cross_divide' in method:
                                signal_data = lsma_cross_divide(indic_data.iloc[:, 0], util_period=util_period)
                            elif 'sma_cross_minus' in method:
                                signal_data = sma_cross_minus(indic_data.iloc[:, 0], util_period=util_period)
                            elif 'sma_cross_divide' in method:
                                signal_data = sma_cross_divide(indic_data.iloc[:, 0], util_period=util_period)
                            elif 'ema_cross_minus' in method:
                                signal_data = ema_cross_minus(indic_data.iloc[:, 0], util_period=util_period)
                            elif 'ema_cross_divide' in method:
                                signal_data = ema_cross_divide(indic_data.iloc[:, 0], util_period=util_period)
                            elif 'longshort' in method:
                                if 'ma_period' in func_vars:
                                    indic_data_short = get_indic_data(ta_func, func_vars, period=period,
                                                                      ma_period=util_period[0], short_long_period=short_long_period)
                                    indic_data_long = get_indic_data(ta_func, func_vars, period=period,
                                                                     ma_period=util_period[1], short_long_period=short_long_period)
                                else:
                                    indic_data_short = get_indic_data(ta_func, func_vars, period=util_period[0],
                                                                      ma_period=ma_period, short_long_period=short_long_period)
                                    indic_data_long = get_indic_data(ta_func, func_vars, period=util_period[1],
                                                                     ma_period=ma_period, short_long_period=short_long_period)
                                mask1 = (indic_data_short is None) or (indic_data_short.isnull().values.all()) or (indic_data_short.empty)
                                mask2 = (indic_data_long is None) or (indic_data_long.isnull().values.all()) or (indic_data_long.empty)
                                if mask1 or mask2:
                                    continue
                                if 'minus' in method:
                                    signal_data = longshort_cross_minus(indic_data_short.iloc[:, 0], indic_data_long.iloc[:, 0], util_period=util_period)
                                elif 'divide' in method:
                                    signal_data = longshort_cross_divide(indic_data_short.iloc[:, 0], indic_data_long.iloc[:, 0], util_period=util_period)
                                elif 'mtm_reversal' in method:
                                    pass
                                else:
                                    raise NotImplementedError

                            elif 'kline_cross_minus' in method:
                                signal_data = kline_cross_minus(indic_data.iloc[:, 0], ohlcv_data)
                            elif 'kline_cross_divide' in method:
                                signal_data = kline_cross_divide(indic_data.iloc[:, 0], ohlcv_data)
                            elif 'tangent' in method:
                                signal_data = tangent(indic_data.iloc[:, 0], util_period=util_period)
                            elif 'holding_pct' in method:
                                signal_data = ts_percentile_rank_score(indic_data.iloc[:, 0],
                                                                       way='expanding', rank_method='quantile',
                                                                       periods=None, min_periods=365, positive=True,
                                                                       scale=100, preprocess=False)
                            else:
                                raise NotImplementedError
                        else:
                            signal_data = get_indic_data(method, method_func_vars, period, ma_period, short_long_period)
                            if (signal_data is None) or (signal_data.isnull().values.all()) or (signal_data.empty):
                                continue
                            else:
                                signal_data = signal_data.iloc[:, 0]

                        factor_name = ta_func.__name__ + f'-{method_name}' + f'-{period}-{ma_period}-{short_long_period}-method_kwargs-{util_period}'
                        file_path = os.path.join(TA_TEST_DIR, f'{ta_func.__name__}\\{factor_name}')
                        os.makedirs(file_path, exist_ok=True)
                        file_name_temp = os.path.join(file_path, f'backtest_result_all.xlsx')
                        if os.path.exists(f'{file_name_temp}'):  # 该参数情形已经测试过，跳过该参数测试
                            perf_temp = pd.read_excel((f'{file_name_temp}'), index_col='Unnamed: 0')
                            perf_temp = perf_temp.T
                            perf_temp['主动管理超额年化收益率'] = [float(i[:-1]) for i in perf_temp['主动管理超额年化收益率']]
                            perf_temp = perf_temp[perf_temp['主动管理超额年化收益率'] >= 10]
                            perf_temp['主动管理超额年化收益率'] = [f"{float(i):.2f}%" for i in perf_temp['主动管理超额年化收益率']]
                            perf_temp = perf_temp.T
                            all_perf_list.append(perf_temp)
                            continue

                        ts = indic_data.iloc[:, 0]
                        ts = ts[ts[~ts.isnull()].index[0]:]  # 剔除数据最开始的NAN
                        factor_name_temp = ta_func.__name__ + f'-{period}-{ma_period}-{short_long_period}'
                        file_name = os.path.join(file_path, f'{factor_name_temp}')
                        ts.to_csv(f'{file_name}.csv')  # 数据保存
                        ts = ts[start_date:end_date]

                        ts_1 = ts.copy()
                        ts_1.index = pd.to_datetime(ts_1.index)
                        log_price = prices_df['log_prices']
                        log_price.index = pd.to_datetime(log_price.index)
                        asset_prices_series = prices_df['close']

                        fig = plt.figure(figsize=(8, 6))
                        ax1 = fig.add_subplot(111)
                        # prices_df['close'].plot(ax=ax1, style='bD--', alpha=0.4, label='比特币价格')  # alpha表示点的透明程度
                        log_price.plot(ax=ax1, style='b', alpha=0.4, label=f'{asset}对数价格')  # alpha表示点的透明程度
                        # plt.xticks(np.arange(1990,2016))
                        plt.xlabel('日期')
                        # ax1.set_yticks(np.arange(0,0.9,0.1))        # 设置左边纵坐标刻度
                        ax1.set_ylabel(f'{asset} log_price')  # 设置左边纵坐标标签
                        plt.legend(loc=2)  # 设置图例在左上方

                        ax2 = ax1.twinx()
                        ts_1.plot(ax=ax2, style='y', grid=True, alpha=0.4, label=f'{factor_name_temp}')
                        # ax2.set_yticks(np.arange(0,0.121,0.015))    # 设置右边纵坐标刻度
                        ax2.set_ylabel(f'{factor_name_temp}')  # 设置右边纵坐标标签
                        plt.legend(loc=1)  # 设置图例在右上方
                        plt.title(f'{factor_name_temp} & {asset} log_price')  # 给整张图命名

                        file_name = os.path.join(file_path, f'{factor_name_temp}-{asset} log_price')
                        plt.savefig(f'{file_name}.png', dpi=400, bbox_inches='tight')
                        # plt.show()  # 图片不用展示
                        fig.clf()
                        plt.close(fig)

                        if not ('longshort_mtm_reversal' in method_name):
                            ts = signal_data
                            ts = ts[ts[~ts.isnull()].index[0]:]  # 剔除数据最开始的NAN
                            file_name = os.path.join(file_path, f'{factor_name}')
                            ts.to_csv(f'{file_name}.csv')  # 数据保存
                            ts = ts[start_date:end_date]

                        perf_list = []
                        # 指标信号转换方法不属于二分法，三分法，四分法，五分法； 应该是枚举法或者K线结合形态法等
                        if (not isinstance(method, str)) or (method[-1:] not in ['2', '3', '4', '5']):
                            bins_list = [()]
                        else:  # 根据指标的取值范围及信号方法
                            bins_list = threshold_for_method_dict[ta_func_value_range][method]
                            # 过滤掉一些不需要测试的阈值
                            if ('longshort_mtm_reversal' in method_name):
                                temp_max = max(indic_data_short.iloc[:, 0].max(), indic_data_long.iloc[:, 0].max())
                                temp_min = min(indic_data_short.iloc[:, 0].min(), indic_data_long.iloc[:, 0].min())
                            else:
                                temp_max = ts.max()
                                temp_min = ts.min()
                            bins_list = [(temp_min,) + bins + (temp_max,) for bins in bins_list if (bins[0] > temp_min and bins[-1] < temp_max)]
                            # # 测试5%, 10%, 20%， 40%， 60%， 80%， 90%， 95% 分位点阈值
                            # temp = ts.copy()
                            # threshold_list = list(np.linspace(temp.min(), temp.max(), 6))
                            # threshold_list = threshold_list[1:-1]
                            # threshold_list.append(temp.min() + 0.05 * (temp.max() - temp.min()))
                            # threshold_list.append(temp.min() + 0.1 * (temp.max() - temp.min()))
                            # threshold_list.append(temp.min() + 0.9 * (temp.max() - temp.min()))
                            # threshold_list.append(temp.min() + 0.95 * (temp.max() - temp.min()))
                            if not bins_list:  # 需要测试的阈值为空， 跳过
                                continue

                        for bins in bins_list:
                            if len(bins) == 4:  # 三分法需要测试左侧交易，右侧交易
                                left_right = ['left', 'right']
                            elif len(bins) in [0, 3, 5, 6]:  # 两分法四分法五分法不需要区分左右侧交易
                                left_right = ['left']
                            else:
                                raise NotImplementedError

                            for left_right_trading in left_right:
                                for positive in [False, True]:  # 指标是越小越好，还是越大越好
                                    if (not isinstance(method, str)) or (method[-1:] not in ['2', '3', '4', '5', 't']):
                                        # ts 已经是信号了
                                        if positive:
                                            signal = ts
                                        else:
                                            signal = ts.copy()
                                            signal[ts == 1] = 'sell'
                                            signal[ts == 0] = 'buy'
                                            signal[signal == 'buy'] = 1
                                            signal[signal == 'sell'] = 0
                                    elif method[-1:] in ['2', '3']:
                                        signal = ts_threshold_signal(ts, positive=positive, bins=bins, left_right=left_right_trading)
                                    elif method[-1:] in ['4', '5']:
                                        if 'longshort' in method:
                                            signal = ts_mtm_reversal_signal([indic_data_short.iloc[:, 0], indic_data_long.iloc[:, 0]], positive=positive, bins=bins)
                                        elif 'threshold' in method:
                                            signal = ts_mtm_reversal_signal([ts], positive=positive, bins=bins)
                                        else:
                                            raise NotImplementedError
                                    elif 'holding_pct' in method:
                                        signal_data = ts_percentile_rank_score(indic_data.iloc[:, 0], way='expanding', rank_method='quantile',
                                                                               periods=None, min_periods=365,
                                                                               positive=positive, scale=100, preprocess=False)
                                        signal = signal_data / 100
                                    else:
                                        raise NotImplementedError

                                    if signal.isnull().all():  # 信号全为NaN或者signal为空的series, 跳过该测试情形
                                        print(f'{method_name}-{bins}-{positive}-{left_right_trading}情形下信号全为NaN, 无法测试')
                                        continue

                                    # 交易信号以及对应的交易价格
                                    trade_date_signal = signal[signal != signal.shift(1)]
                                    trade_date_signal_prices = pd.concat([trade_date_signal, asset_prices_series], axis=1).dropna()
                                    trade_date_signal_prices.columns = ['signal', 'price']
                                    file_name = os.path.join(file_path, f'{method_name}-{bins}-{positive}-{left_right_trading}')
                                    trade_date_signal_prices.to_csv(f'{file_name}.csv')

                                    # signal 需要滞后一期使用
                                    signal = signal.shift(1)
                                    signal.dropna(inplace=True)
                                    asset_weights_df = pd.DataFrame(signal).reset_index()
                                    asset_weights_df.columns = ['tradedate', 'weights']
                                    asset_weights_df['code'] = asset

                                    # 回测绩效分析
                                    if signal.isnull().all():  # 滞后一期后信号全为NaN或者signal为空的series, 跳过该测试情形
                                        continue
                                    else:
                                        signal_start_date = signal.index[0]
                                    # if start_date not in test_period_start_date:
                                    #     test_period_start_date.append(start_date)

                                    portfolio_equity, benchmark_equity, portfolio_ret, perf = simple_backtest(
                                        asset_weights_df=asset_weights_df,
                                        asset_ret_df=all_asset_ret_df,
                                        asset_type='crypto',
                                        start_date=signal_start_date,
                                        end_date=end_date,
                                        benchmark=asset,
                                        benchmark_type='crypto',
                                        benchmark_ret_df=benchmark_ret_df,
                                        if_plot=False,
                                        file_name=file_name,
                                        commission=commission,
                                        log_ret=True,
                                        long_short='long_only')

                                    if 'holding_pct' not in method_name:  # 仓位信号没办法统计胜率
                                        # 交易胜率分析
                                        winning_test_data = pd.concat([signal, asset_ret_series], axis=1).dropna()
                                        winning_test_data.columns = ['signal', 'ret']

                                        best_buy_period = pd.DataFrame([['2018-12-05', '2019-03-30'],
                                                                        ['2020-03-12', '2020-04-01'],
                                                                        ['2020-04-26', '2020-10-14'],
                                                                        ['2021-07-12', '2021-07-25']],
                                                                       columns=['start', 'end'])
                                        best_sell_period = pd.DataFrame([['2017-12-07', '2018-01-15'],
                                                                         ['2019-06-21', '2019-08-25'],
                                                                         ['2020-02-01', '2020-03-10'],
                                                                         ['2021-02-10', '2021-05-16']],
                                                                        columns=['start', 'end'])

                                        win_rate_analysis = performance.winning_rate(winning_test_data,
                                                                                     best_buy_period=best_buy_period,
                                                                                     best_sell_period=best_sell_period)
                                        win_rate_analysis.name = '组合策略绩效'
                                        win_rate_analysis = pd.DataFrame(win_rate_analysis)
                                        win_rate_analysis[perf.columns[1]] = np.NAN
                                        perf = pd.concat([perf, win_rate_analysis], axis=0)
                                    else:
                                        index = ['交易次数', '平均多少天交易一次', '总胜率',
                                                 '买入信号胜率', '买入最大盈利', '买入最大亏损', '买入最大回撤', '买入平均盈利',
                                                 '卖出信号胜率', '卖出最大盈利', '卖出最大亏损', '卖出最大回撤', '卖出平均盈利',
                                                 '最佳买入占所有最佳买点比例', '最佳买入占所有买入信号比例',
                                                 '最佳卖出占所有最佳卖点比例', '最佳卖出占所有卖出信号比例',
                                                 '最佳买卖占所有最佳买卖点比例', '最佳买卖占所有买卖信号比例',
                                                 ]
                                        fake_win_rate = pd.DataFrame(index=index)
                                        perf = pd.concat([perf, fake_win_rate], axis=0)

                                        # 买卖点质量分析（买卖点是否处在局部最高或者局部最低）

                                    perf.rename(columns={'组合策略绩效': f'{factor_name}-{bins}-{positive}-{left_right_trading}'}, inplace=True)
                                    perf_temp = perf.iloc[:, 0]
                                    if float(perf_temp['主动管理超额年化收益率'][:-1]) >= 10:  # 收益比较大才纳入整体统计，不然all_perf数据大小超标
                                        all_perf_list.append(perf_temp)
                                    perf_list.append(perf.iloc[:, 0])

                        if perf_list:
                            all_perf_1 = pd.concat(perf_list, axis=1)
                        else:
                            all_perf_1 = pd.DataFrame(index=perf.index)
                        all_perf_1.loc['主动管理超额年化收益率', :] = [float(i[:-1]) for i in all_perf_1.loc['主动管理超额年化收益率', :]]
                        all_perf_1.sort_values(by=['主动管理超额年化收益率'], axis=1, ascending=False, inplace=True)
                        all_perf_1.loc['主动管理超额年化收益率', :] = [f"{float(i):.2f}%" for i in all_perf_1.loc['主动管理超额年化收益率', :]]
                        file_name = os.path.join(file_path, f'backtest_result_all')
                        all_perf_1.to_excel((f'{file_name}.xlsx'))

    if all_perf_list:
        all_perf = pd.concat(all_perf_list, axis=1)
    else:
        all_perf = pd.DataFrame(index=perf.index)
    all_perf.loc['主动管理超额年化收益率', :] = [float(i[:-1]) for i in all_perf.loc['主动管理超额年化收益率', :]]
    all_perf.sort_values(by=['主动管理超额年化收益率'], axis=1, ascending=False, inplace=True)
    all_perf.loc['主动管理超额年化收益率', :] = [f"{float(i):.2f}%" for i in all_perf.loc['主动管理超额年化收益率', :]]
    file_path = os.path.join(TA_TEST_DIR, f'{ta_func.__name__}')
    file_name = os.path.join(file_path, f'backtest_result_all')
    all_perf.to_excel((f'{file_name}.xlsx'))

    return all_perf


def tp_match_test(origin_factor_series, asset_prices_series):
    for factor_type in ['origin', 'diff_d', 'diff_w', 'diff_m']:
        # 判断指标的类型
        if factor_type[:6] == 'origin':
            factor_series = origin_factor_series
        elif factor_type[:6] == 'diff_d':
            factor_series = ts_diff(origin_factor_series, period=1)
        elif factor_type[:6] == 'diff_w':
            factor_series = ts_diff(origin_factor_series, period=7)
        elif factor_type[:6] == 'diff_m':
            factor_series = ts_diff(origin_factor_series, period=30)
        else:
            raise NotImplementedError

        factor_name = factor_series.name + f'_{factor_type}'
        file_path = os.path.join(TEST_DIR, f'{factor_name}')
        os.makedirs(file_path, exist_ok=True)

        # %% 拐点匹配测试(峰谷匹配比例，峰点匹配比例，谷点匹配比例)
        # 拐点匹配用到的Y为价格指数, 或者log price; 未避免指标频繁波动带来的误判，需要先对价格和指标进行滤波
        tp_df = pd.concat([factor_series, asset_prices_series], axis=1)
        tp_df.index = pd.to_datetime(tp_df.index)
        tp_df.dropna(inplace=True)

        tp_factor_series = tp_df[factor_series.name]
        tp_factor_series.name = factor_series.name + f'_{factor_type}'
        tp_price_series = tp_df['log_prices']

        # 去除趋势
        file_name = os.path.join(file_path, f'turning point matching_factor_detrend')
        de_trended_factor_data, factor_trend = de_trend(tp_factor_series, ts_freq='D', if_plot=True,
                                                        file_name=file_name)
        file_name = os.path.join(file_path, f'turning point matching_target_detrend')
        de_trended_prices_data, price_trend = de_trend(tp_price_series, ts_freq='D', if_plot=True, file_name=file_name)

        # 去除噪音
        file_name = os.path.join(file_path, f'turning point matching_factor_denoise')
        cleaned_factor_data, factor_noise = de_noise(de_trended_factor_data, ts_freq='D', lamda=500, if_plot=True,
                                                     file_name=file_name)
        file_name = os.path.join(file_path, f'turning point matching_target_denoise')
        cleaned_prices_data, price_noise = de_noise(de_trended_prices_data, ts_freq='D', lamda=500, if_plot=True,
                                                    file_name=file_name)

        # # 分析指标拐点
        # factor_tp = TP_analysis(cleaned_prices_data, de_trended_factor_data, data_frequency='day')
        # factor_tp.ana_tp(plot=True)
        #
        # # 分析价格拐点
        # price_tp = TP_analysis(cleaned_factor_data, de_trended_prices_data, data_frequency='day')
        # price_tp.ana_tp(plot=True)

        file_name = os.path.join(file_path, f'turning point matching_statistics')
        tp_matching = match_tp(cleaned_factor_data, cleaned_prices_data, de_trended_factor_data, de_trended_prices_data,
                               data_frequency='day')
        all_tp_df, all_matched_tp_df, matched_tp_statistics = tp_matching.match(if_plot=True, file_name=file_name)


if __name__ == '__main__':
    asset = 'BTC'
    start_date = '2015-01-01'
    end_date = '2021-10-13'

    # prices_df = get_prices(ohlc=False, asset=asset, start_date=start_date, end_date=end_date)
    # prices_df['log_prices'] = np.log10(prices_df['close'])
    # prices_df['ma140'] = prices_df['close'].rolling(window=140, min_periods=140).mean()
    # prices_df['close_ma140_ratio'] = prices_df['close'] / prices_df['ma140']
    # # prices_df.to_csv(f'{asset}价格与均线对比.csv')

    # fig = plt.figure(figsize=(8, 6))
    # ax1 = fig.add_subplot(111)
    # # prices_df['close'].plot(ax=ax1, style='bD--', alpha=0.4, label='比特币价格')  # alpha表示点的透明程度
    # # prices_df['ma140'].plot(ax=ax1, style='go-.', alpha=0.5, label='140日均线')  # v指三角形，D指正方形
    # prices_df['log_prices'].plot(ax=ax1, style='b', alpha=0.4, label=f'{asset}对数价格')  # alpha表示点的透明程度
    # # prices_df['ma140'].plot(ax=ax1, style='g', alpha=0.5, label='140日均线')  # v指三角形，D指正方形
    # # plt.xticks(np.arange(1990,2016))
    # plt.xlabel('日期')
    # # ax1.set_yticks(np.arange(0,0.9,0.1))        # 设置左边纵坐标刻度
    # ax1.set_ylabel('价格')  # 设置左边纵坐标标签
    # plt.legend(loc=2)  # 设置图例在左上方
    #
    # ax2 = ax1.twinx()
    # # prices_df['close_ma140_ratio'].plot(ax=ax2, grid=True, label='比特币价格/140日均线', style='y>-.', alpha=0.7)
    # prices_df['close_ma140_ratio'].plot(ax=ax2, grid=True, label=f'{asset}价格/140日均线', style='y', alpha=0.7)
    # # ax2.set_yticks(np.arange(0,0.121,0.015))    # 设置右边纵坐标刻度
    # ax2.set_ylabel(f'{asset}价格/140日均线')  # 设置右边纵坐标标签
    # plt.legend(loc=1)  # 设置图例在右上方
    # plt.title(f'{asset}价格与均线对比')  # 给整张图命名
    #
    # # plt.savefig('价格与均线对比.png', dpi=400, bbox_inches='tight')
    # plt.show()
    # fig.clf()
    # plt.close(fig)

    # %% 因子预处理，如去极值，标准化，去噪音，去趋势等等

    # all_asset_ret = get_ret([asset, 'USDT'], start_date, end_date)  # 获取该资产与USDT收益率数据
    # all_asset_ret.to_csv(f'all_asset_ret.csv')

    file_name = os.path.join(DATA_DIR, f'close_ma140_ratio')
    origin_factor_series = pd.read_csv(f'{file_name}.csv', index_col=0)['close_ma140_ratio']
    file_name = os.path.join(DATA_DIR, f'{asset}_price_log_price')
    prices_df = pd.read_csv(f'{file_name}.csv', index_col=0)
    asset_prices_series = prices_df['log_prices']  # 提取该资产价格数据，进行后续的拐点匹配检验
    file_name = os.path.join(DATA_DIR, f'all_asset_ret')
    all_asset_ret = pd.read_csv(f'{file_name}.csv', index_col=0)
    asset_ret_series = all_asset_ret[asset]  # 提取该资产收益率数据，进行后续的相关性有效性检验
    all_asset_ret_df = all_asset_ret.unstack().reset_index()
    all_asset_ret_df.columns = ['code', 'tradedate', 'daily_ret']  # 对该资产以及USDT收益率数据变形，满足回测需要
    benchmark_ret_df = pd.DataFrame(all_asset_ret[asset])  # 提取该资产收益率数据，作为后续回测检验的基准

    # periodly_timing_test(origin_factor_series, asset_ret_series)
    # backtest_test(origin_factor_series, all_asset_ret_df, benchmark_ret_df)
    tp_match_test(origin_factor_series, asset_prices_series)