from itertools import product
from environ import playCfg
from function import *
import re
from datetime import timedelta
import time
from joblib import load, dump
import tempfile

temp_path = os.path.join(root_path, 'data', 'temp')
if not os.path.exists(temp_path):
    os.makedirs(temp_path)

swap_dict = {}
temp_files = []


def get_swap_df(df_path):
    if df_path not in swap_dict:
        log.debug(f"加载数据 {df_path}")
        swap_dict[df_path] = load(df_path, mmap_mode='c')
    return swap_dict[df_path]


output_path = os.path.join(root_path, 'src_backtesting/output/参数遍历结果')
output_fa_path = os.path.join(output_path, '选币因子分类')
output_fi_path = os.path.join(output_path, '过滤因子分类')
if not os.path.exists(output_path):
    os.mkdir(output_path)
if not os.path.exists(output_fa_path):
    os.mkdir(output_fa_path)
if not os.path.exists(output_fi_path):
    os.mkdir(output_fi_path)


# 运行回放
def run_play(df_path, playCfg, othCfg, replace_symbol_to_int, replace_symbol_to_int_, all_symbol_list, symbols_data_path, quit_arry):
    time.sleep(1)
    log_level, cal_factor_type, hourly_details, select_by_hour, filter_before_exec, filter_after_exec, start_date, end_date, factor_long_list, factor_short_list, trade_type, compound_name, quit_symbol_filter_hour, p_signal_fun, select_offsets, white_list, black_list = load_othCfg(othCfg)
    log.remove()
    log.add(sys.stdout, level=log_level)
    hold_hour = str(playCfg['hold_hour_num'][0]) + 'H'
    # 提取前置过滤因子
    filter_list = []
    [filter_list.extend(re.findall(r"\['(.+?)'\]", x))
     for x in filter_before_exec + filter_after_exec]
    filter_list = list(dict.fromkeys(filter_list))

    if 'fundingRate' in filter_list:
        use_fundingRate = True
        filter_list.remove('fundingRate')
    else:
        use_fundingRate = False
    start_date = pd.to_datetime(start_date)
    end_date = pd.to_datetime(end_date)

    all_factor_list = factor_long_list + factor_short_list
    factor_class_list = tools.convert_to_cls(all_factor_list)
    filter_class_list = [i.split('_fl_')[0] for i in filter_list]
    feature_list = tools.convert_to_feature(all_factor_list)
    df = get_swap_df(df_path)
    symbols_data = get_swap_df(symbols_data_path)
    df = df[['candle_begin_time', 'close', 'symbol'] + feature_list + filter_list]
    # # ===读取数据
    # df = reader.readhour(
    #     trade_type,
    #     factor_class_list,
    #     filter_class_list=filter_class_list)
    # if df['candle_begin_time'].max() < pd.to_datetime(end_date):
    #     data_modify_time = df['candle_begin_time'].max() - timedelta(hours=1)
    #     log.warning(f'本地数据最新日期小于设定回测结束时间,请检查。本次回测结束时间将被改为:{data_modify_time}')
    #     end_date = data_modify_time
    # if df['candle_begin_time'].min() > pd.to_datetime(start_date) - timedelta(hours=int(playCfg['hold_hour_num'][0])):
    #     data_modify_time = df['candle_begin_time'].min() + timedelta(hours=int(playCfg['hold_hour_num'][0]))
    #     log.warning(f'本地数据最早日期大于设定回测开始时间,请检查。本次回测开始时间将被改为:{data_modify_time}')
    #     start_date = data_modify_time
    end_date = df['candle_begin_time'].max()

    # ===计算因子
    if cal_factor_type == 'cross':
        # 横截面
        df = cal_factor_by_cross(df, factor_long_list, factor_short_list)
    elif cal_factor_type == 'vertical':
        # 纵截面
        df = cal_factor_by_vertical(df, factor_long_list, factor_short_list)
    else:
        raise ValueError('cal_factor_type set error!')
    log.info('因子计算完成')

    # numpy 选币
    base_index = pd.date_range(start=start_date - timedelta(hours=int(playCfg['hold_hour_num'][0])), end=end_date, freq='1H').tolist()
    select_coin_long, select_coin_short, arr_data = np_gen_selected(
        df, base_index, filter_before_exec, filter_after_exec, select_by_hour, playCfg, select_offsets, white_list, black_list, replace_symbol_to_int)
    log.info('选币完成')

    res, curve, account_df, display_df, order_df = neutral_strategy_playback(
        playCfg,
        p_signal_fun,
        start_date,
        end_date,
        symbols_data,
        arr_data,
        quit_arry,
        all_symbol_list,
        replace_symbol_to_int,
        replace_symbol_to_int_,
        select_coin_long,
        select_coin_short,
        compound_name=compound_name,
        hourly_details=hourly_details)
    data_path = os.path.join(rtn_data_path, compound_name)
    if not os.path.exists(data_path):
        os.makedirs(data_path)
    # 回放数据保存
    save_path = os.path.join(data_path, '净值持仓数据.csv')
    res.to_csv(save_path, encoding='gbk')
    curve.to_csv(save_path, encoding='gbk', mode='a')
    save_path = os.path.join(data_path, '虚拟账户数据.csv')
    account_df.to_csv(save_path, encoding='gbk')
    save_path = os.path.join(data_path, '持仓面板数据.pkl')
    display_df.to_pickle(save_path)
    save_path = os.path.join(data_path, '下单面板数据.pkl')
    order_df.to_pickle(save_path)
    log.info(f'\n{res.to_markdown()}')

    # # plotly 作图
    # plot_output(curve, res, data_path, save_html=True)
    # # 船队作图整合
    # plot_log_double(curve)

    # 将因子参数添加到res中， 保存格式
    res['因子名'] = factor_long_list[0][0]
    res['因子TF'] = factor_long_list[0][1]
    res['因子参数'] = factor_long_list[0][2]
    res['因子差分'] = factor_long_list[0][3]

    for i in range(len(filter_list)):
        res[f'过滤因子_{i + 1}'] = filter_list[i].split('_fl_')[0]
        res[f'过滤因子_参数_{i + 1}'] = filter_list[i].split('_fl_')[1]

    if not os.path.exists(os.path.join(output_fa_path, f'{factor_class_list}.csv')):
        res.to_csv(os.path.join(output_fa_path, f'{factor_class_list}.csv'))
    else:
        res.to_csv(os.path.join(output_fa_path, f'{factor_class_list}.csv'), header=False, mode='a')

    if filter_class_list == []: filter_class_list.append(None)
    if not os.path.exists(os.path.join(output_fi_path, f'{filter_class_list}.csv')):
        res.to_csv(os.path.join(output_fi_path, f'{filter_class_list}.csv'))
    else:
        res.to_csv(os.path.join(output_fi_path, f'{filter_class_list}.csv'), header=False, mode='a')

    return res


def prepare_df(_df, playCfg, othCfgs):
    factor_long_list = []
    factor_short_list = []
    filter_list_save = []
    c_rate, hold_hour_num, long_coin_num, short_coin_num, long_p, short_p, leverage, long_risk_position, initial_trade_usdt, offset_stop_win, offset_stop_loss = load_playCfg(playCfg)
    for othCfg in othCfgs:
        log_level, cal_factor_type, hourly_details, select_by_hour, filter_before_exec, filter_after_exec, start_date, end_date, f_long_list, f_short_list, trade_type, compound_name, quit_symbol_filter_hour, p_signal_fun, select_offsets, white_list, black_list = load_othCfg(
            othCfg)
        factor_short_list += f_short_list
        factor_long_list += f_long_list
        filter_list_save += filter_before_exec + filter_after_exec

    log.remove()
    log.add(sys.stdout, level=log_level)
    hold_hour = str(playCfg['hold_hour_num'][0]) + 'H'
    # 提取前置过滤因子
    filter_list = []
    [filter_list.extend(re.findall(r"\['(.+?)'\]", x))
     for x in filter_list_save]
    filter_list = list(dict.fromkeys(filter_list))

    if 'fundingRate' in filter_list:
        use_fundingRate = True
        filter_list.remove('fundingRate')
    else:
        use_fundingRate = False
    start_date = pd.to_datetime(start_date)
    end_date = pd.to_datetime(end_date)

    all_factor_list = factor_long_list + factor_short_list
    factor_class_list = tools.convert_to_cls(all_factor_list)
    filter_class_list = [i.split('_fl_')[0] for i in filter_list]
    feature_list = tools.convert_to_feature(all_factor_list)

    # # ===读取数据
    # df = reader.readhour(
    #     trade_type,
    #     factor_class_list,
    #     filter_class_list=filter_class_list)
    df = _df.copy()
    if df['candle_begin_time'].max() < pd.to_datetime(end_date):
        data_modify_time = df['candle_begin_time'].max() - timedelta(hours=1)
        log.warning(f'本地数据最新日期小于设定回测结束时间,请检查。本次回测结束时间将被改为:{data_modify_time}')
        end_date = data_modify_time
    if df['candle_begin_time'].min() > pd.to_datetime(start_date) - timedelta(hours=int(playCfg['hold_hour_num'][0])):
        data_modify_time = df['candle_begin_time'].min() + timedelta(hours=int(playCfg['hold_hour_num'][0]))
        log.warning(f'本地数据最早日期大于设定回测开始时间,请检查。本次回测开始时间将被改为:{data_modify_time}')
        start_date = data_modify_time

    # 筛选日期范围
    df = df[df['candle_begin_time'] >= pd.to_datetime(
        start_date) - timedelta(hours=int(playCfg['hold_hour_num'][0]))]
    df = df[df['candle_begin_time'] <= pd.to_datetime(end_date)]

    all_symbol_list = sorted(list(set(df['symbol'].unique())))
    replace_symbol_to_int = {v: k for k, v in enumerate(all_symbol_list)}
    replace_symbol_to_int_ = {k: v for k, v in enumerate(all_symbol_list)}
    df['symbol'] = df['symbol'].replace(replace_symbol_to_int)
    symbols_data = df[['candle_begin_time', 'symbol', 'close', 'avg_price']]

    # 删除某些行数据
    df = df[df['volume'] > 0]  # 该周期不交易的币种
    # 最后几行数据,下个周期_avg_price为空
    df.dropna(subset=['下个周期_avg_price'], inplace=True)
    # ===数据预处理
    df = df[['candle_begin_time', 'close', 'symbol'] +
            feature_list + filter_list]
    df = df.set_index(['candle_begin_time', 'symbol']).sort_index()
    df = df.replace([np.inf, -np.inf], np.nan)
    # 因子空值都用中位数填充, 如果填0可能后面rank排序在第一或者最后
    # df = df.fillna(value=0)
    df[feature_list] = df[feature_list].apply(lambda x: x.fillna(x.median()))
    df = df.reset_index()

    if use_fundingRate:
        # ===整合资金费率
        fundingrate_data = reader.read_fundingrate()
        fundingrate_data['symbol'] = fundingrate_data['symbol'].replace(replace_symbol_to_int)
        df = pd.merge(df,
                      fundingrate_data[['candle_begin_time', 'symbol', 'fundingRate']],
                      on=['candle_begin_time', 'symbol'], how="left")
        df['fundingRate'].fillna(value=0, inplace=True)
        log.info('整合资金费率完成')

    # 提前排除退市币种
    df2 = df.reset_index()
    max_time = df['candle_begin_time'].max()
    quit_df = df.groupby('symbol')['candle_begin_time'].max().to_frame()
    quit_df = quit_df[quit_df['candle_begin_time'] < max_time]
    quit_symbols = quit_df.index.tolist()
    quit_df_ = df[df['symbol'].isin(quit_symbols)]
    noquit_df = df[~df['symbol'].isin(quit_symbols)]
    # 退市币种的处理,实盘提前N小时加入黑名单
    quit_df_ = quit_df_.groupby('symbol', group_keys=False).apply(
        lambda x: x.iloc[:-quit_symbol_filter_hour - 1])
    df = noquit_df.append(quit_df_)
    if quit_df_.empty:
        quit_arry = np.array([])
    else:
        df_quit = quit_df_.groupby('symbol').tail(1)
        df_quit['runtime'] = ((df_quit['candle_begin_time'] - pd.to_datetime(start_date)).dt.total_seconds() / 3600).astype('int')
        quit_arry = df_quit[['runtime', 'symbol']].values
    log.info('数据处理完成')
    return df, replace_symbol_to_int, replace_symbol_to_int_, all_symbol_list, symbols_data, quit_arry


class FactorProcessor():
    def __init__(self, ergodic_factor_list) -> None:
        self.Iterative_factor_list = []
        for i in ergodic_factor_list['name']:
            self.Iterative_factor_list.append({
                'class': [i],
                'params': [[factor] for factor in product([i], ergodic_factor_list['tf'], ergodic_factor_list['params'], ergodic_factor_list['diff'], ergodic_factor_list['weigth'])]
            })

    def get_class_list(self):
        return [i['class'] for i in self.Iterative_factor_list]

    def get_params_list(self, class_name):
        for i in self.Iterative_factor_list:
            if i['class'] == class_name:
                return i['params']


class FilterProcessor():
    def __init__(self, ergodic_filter_list) -> None:

        def format_filter(*filter_list):
            result = []
            for direction_filter in filter_list:
                res = []
                for _filter in direction_filter:
                    res.append([_filter[0], f"{_filter[1]}_fl_{_filter[2]}", _filter[3], _filter[4], _filter[5], _filter[6].value, _filter[7]])
                result.append(res)
            # 简写上方代码
            # result = [[[_filter[0],f"{_filter[1]}_fl_{_filter[2]}",_filter[3],_filter[4],_filter[5],_filter[6].value,_filter[7]] for _filter in direction_filter] for direction_filter in filter_list]
            return result

        self.Iterative_factor_list = []
        for i in ergodic_filter_list:
            filter_list = []
            for j in i.keys():
                filter_list.append(list(product(*format_filter(*[list(product([j], *k.values())) for k in i[j]]))))
            self.Iterative_factor_list.append({
                'class': list(set([k['name'][0] for a in i.values() for k in a])),
                'params': list(product(*filter_list))
            })

    def get_class_list(self):
        return [i['class'] for i in self.Iterative_factor_list]

    def get_params_list(self, class_name):
        for i in self.Iterative_factor_list:
            if i['class'] == class_name:
                return i['params']


def run():
    aaa = FactorProcessor(ergodic_factor_list)
    bbb = FilterProcessor(ergodic_filter_list)
    product_list = list(product(aaa.get_class_list(), bbb.get_class_list()))
    print('product_list = ', len(product_list), '\n', product_list)
    # 因子配对组合
    for factor_class_list, filter_class_list in product_list:
        print('factor_class_list = ', factor_class_list, 'filter_class_list = ', filter_class_list)
        # 读取数据
        df = reader.readhour(
            trade_type,
            factor_class_list,
            filter_class_list=filter_class_list)

        # 因子分组
        factor_params_list = aaa.get_params_list(factor_class_list)
        filter_params_list = bbb.get_params_list(filter_class_list)

        # 获得所有的参数组合
        params_list = list(product(factor_params_list, filter_params_list))
        print('params_list = ', len(params_list), '\n', params_list)

        _othCfg_list = []
        # 运行回放
        for params in params_list:
            _othCfg = othCfg.copy()
            _othCfg['factor_long_list'] = params[0]
            _othCfg['factor_short_list'] = params[0]
            _othCfg['filter_before_exec'] = [filter_generate(param=i) for param in params[1] for i in param]
            _othCfg_list.append(_othCfg)

        # #串行回放
        # for _othCfg in _othCfg_list:
        #     run_play(df,playCfg, _othCfg)

        # 并行回放
        njobs = os.cpu_count() - 2
        df, replace_symbol_to_int, replace_symbol_to_int_, all_symbol_list, symbols_data, quit_arry = prepare_df(df, playCfg, _othCfg_list)
        # dump(df, f'{temp_path}/df.pkl')
        # dump(symbols_data, f'{temp_path}/symbols_data.pkl')

        with tempfile.NamedTemporaryFile(dir=temp_path, delete=False) as temp_df_file:
            dump(df, temp_df_file.name)
            df_temp_path = temp_df_file.name  # 保存临时文件路径以供后续使用
            temp_files.append(df_temp_path)  # 添加到临时文件列表

        with tempfile.NamedTemporaryFile(dir=temp_path, delete=False) as temp_symbols_data_file:
            dump(symbols_data, temp_symbols_data_file.name)
            symbols_data_temp_path = temp_symbols_data_file.name
            temp_files.append(symbols_data_temp_path)  # 添加到临时文件列表

        Parallel(n_jobs=njobs)(delayed(run_play)(f'{df_temp_path}', playCfg, _othCfg, replace_symbol_to_int, replace_symbol_to_int_, all_symbol_list, f"{symbols_data_temp_path}", quit_arry) for _othCfg in _othCfg_list)
        # time.sleep(5)  # 还报错的话，把5改大一点
        # os.remove(f'{temp_path}/df.pkl')
        # os.remove(f'{temp_path}/symbols_data.pkl')

        del df


def split_list_by_chunk_size(lst, chunk_size):
    """将列表分割成指定大小的块，最后一个块可能小于指定大小。"""
    return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]


if __name__ == '__main__':

    # 过滤修改配置处================================================================================================

    # ===年签配置
    for compound_name in ['2021', '2022', '2023', '2024']:
        if compound_name == '2020':
            start_date = '2020-01-01'  # 回测开始时间
            end_date = '2020-12-31'  # 回测结束时间
        elif compound_name == '2021':
            start_date = '2021-01-01'  # 回测开始时间
            end_date = '2021-12-31'  # 回测结束时间
        elif compound_name == '2022':
            start_date = '2022-01-01'  # 回测开始时间
            end_date = '2022-12-31'  # 回测结束时间
        elif compound_name == '2023':
            start_date = '2023-01-01'  # 回测开始时间
            end_date = '2023-12-31'  # 回测结束时间
        elif compound_name == '2024':
            start_date = '2024-01-01'  # 回测开始时间
            end_date = '2024-12-31'  # 回测结束时间

        # ===常规配置
        cal_factor_type = 'cross'  # cross/ vertical

        # ===过滤因子配置
        factor_list_name = ['Bbw']
        # ===过滤因子bh参数配置 (注意：factor_list_params尽量是nums的整数倍，当然代码也已容错处理)
        factor_list_params = list(range(8, 60, 1))
        factor_list_params = np.array(factor_list_params)
        # 这里配置核心数
        nums = os.cpu_count() - 2 # 这里修改的话需要把run方法里的进程数也同步修改
        if len(factor_list_params) <= nums:
            chunks = [factor_list_params]
        else:
            chunks = split_list_by_chunk_size(factor_list_params, nums)

        print('Use_cpu_nums = ', nums, '每个线程费分配参数:', chunks)

        for _factor_list_param in chunks:
            # == 因子配置
            ergodic_factor_list = {
                'name': factor_list_name,  # 需要过滤的因子写在里面
                'tf': [True],
                'params': _factor_list_param,
                'diff': [0],
                'weigth': [1]
            }

            # == 过滤配置 每一组过滤是一个字典,这里和因子配置不同，因子配置是一个字典，过滤配置是一个列表里面放字典，每个字典里面是一个方向的过滤,需要过滤的遍历条件添加到列表里面
            ergodic_filter_list = [
                {
                    'df1': [
                        {
                            'name': ['涨跌幅max'],
                            'params': [24],
                            'type': ['value'],
                            'op': ['lte'],
                            'value': [0.2],
                            'rank_asc': [RankAscending.FALSE],
                            'filter_after': [FilterAfter.FALSE]
                        },
                        {
                            'name': ['Volume'],
                            'params': [24],
                            'type': ['rank'],
                            'op': ['lte'],
                            'value': [60],
                            'rank_asc': [RankAscending.FALSE],
                            'filter_after': [FilterAfter.FALSE]
                        },
                    ],
                    'df2': [
                        {
                            'name': ['涨跌幅max'],
                            'params': [24],
                            'type': ['value'],
                            'op': ['lte'],
                            'value': [0.2],
                            'rank_asc': [RankAscending.FALSE],
                            'filter_after': [FilterAfter.FALSE]
                        },
                        {
                            'name': ['Volume'],
                            'params': [24],
                            'type': ['rank'],
                            'op': ['lte'],
                            'value': [60],
                            'rank_asc': [RankAscending.FALSE],
                            'filter_after': [FilterAfter.FALSE]
                        },
                    ]
                },
            ]

            trade_type = 'swap'
            playCfg['c_rate'] = 6 / 10000  # 手续费
            playCfg['hold_hour_num'] = 1  # hold_hour
            # 只跑指定的N个offset,空列表则全offset
            long_select_offset = []
            short_select_offset = []

            # ===回放增强配置
            playCfg['long_coin_num'] = 1  # 多头选币数
            playCfg['short_coin_num'] = 1  # 空头选币数
            playCfg['long_p'] = 0  # 0 :等权, 0 -> ∞ :多币头部集中度逐渐降低
            playCfg['short_p'] = 0  # long_coin_num = 3, long_p = 1 ;rank 1,2,3 的资金分配 [0.43620858, 0.34568712, 0.21810429]
            playCfg['leverage'] = 1  # 杠杆率
            playCfg['long_risk_position'] = 0  # 多头风险暴露 0.1 对冲后 10% 净多头, -0.2 对冲后 20% 净空头
            playCfg['initial_trade_usdt'] = 10000  # 初始投入,金额过小会导致某些币无法开仓
            # offset 止盈止损,都为0时,该功能关闭
            playCfg['offset_stop_win'] = 0  # offset 止盈
            playCfg['offset_stop_loss'] = 0  # offset 止损

            # 固定白名单 'BTCUSDT'
            long_white_list = []
            short_white_list = []
            # 固定黑名单
            long_black_list = []
            short_black_list = []

            # 截止================================================================================================
            # 后置过滤 在选币后下单前控制选定币种的资金分配系数
            filter_after_params = [
                # ['df2', 'fundingRate', 'value', 'lte', -0.0001, RankAscending.FALSE, FilterAfter.TRUE]
            ]
            filter_after_exec = [filter_generate(param=param) for param in filter_after_params]

            # ===花式配置
            # 资金曲线择时:(param[-1] 默认为计算signal需要的最少小时数)
            p_signal_fun = None
            # param = [48, 48]
            # p_signal_fun = partial(ma_signal, param)

            # ===回放参数配置
            hourly_details = False  # True 会生成详细截面数据 持仓面板和下单面板,耗时增加20S左右
            select_by_hour = False  # True 为逐小时,会对退市币精确处理,速度慢；False 速度快,模糊处理
            othCfg = {
                'log_level': 'INFO',
                'cal_factor_type': cal_factor_type,
                'hourly_details': hourly_details,
                'select_by_hour': select_by_hour,
                'filter_before_exec': [],
                'filter_after_exec': filter_after_exec,
                'start_date': start_date,
                'end_date': end_date,
                'factor_long_list': [],
                'factor_short_list': [],
                'trade_type': trade_type,
                'compound_name': compound_name,
                'quit_symbol_filter_hour': playCfg['hold_hour_num'][0],
                'p_signal_fun': p_signal_fun,
                'select_offsets': [long_select_offset, short_select_offset],
                'white_list': [long_white_list, short_white_list],
                'black_list': [long_black_list, short_black_list],
            }
            if playCfg['offset_stop_win'][0] != 0 or playCfg['offset_stop_loss'][0] != 0:
                assert playCfg['offset_stop_win'][0] > 0 and playCfg['offset_stop_loss'][0] < 0

            run()

    time.sleep(10)
    for temp_file in temp_files:
        try:
            os.remove(temp_file)
            print(f"Deleted temporary file: {temp_file}")
        except OSError as e:
            print(f"Error deleting temporary file,请手动删除 {temp_file}: {e.strerror}")
