"""
    ================================================================================
                            ------------utf-8--------------
    ================================================================================
@Author: 
    rfdsg
@Create Time: 
    2025/3/7 - 17:51
@Description:

@Attention:
    
"""
import gc
import os
import tempfile
import uuid
from itertools import islice

import joblib
import numpy as np
import pandas as pd
from typing import Generator, Callable, List, Iterable, Union
import torch
from joblib import delayed, Parallel
from util.data_process import pr
from evaluation.factor_evaluation import FactorEvaluation, EvaluationMetrics


def single_second_select(
        result: np.ndarray,
        *,
        returns: pd.DataFrame):
    _, val, test = pr.split_valid(result, (0.7, 0.15))
    _, val_returns, test_returns = pr.split_valid(returns, (0.7, 0.15))

    val = pd.DataFrame(val, columns=val_returns.columns, index=val_returns.index)
    val_ic = EvaluationMetrics.P_rank_ic(val, val_returns)
    val_ic_window = EvaluationMetrics.rolling(val_ic)
    val_ir = val_ic_window.mean() / val_ic_window.std()

    test = pd.DataFrame(test, columns=test_returns.columns, index=test_returns.index)
    test_ic = EvaluationMetrics.P_rank_ic(test, test_returns)
    test_ic_window = EvaluationMetrics.rolling(test_ic)
    test_ir: pd.Series = test_ic_window.mean() / test_ic_window.std()
    val_ir = val_ir.mean()
    test_ir = test_ir.mean()
    return {'rolling_val_ir': val_ir, 'rolling_test_ir': test_ir}


def factor_ic_eva_pare(fun_lists: Union[list[Callable], List[str]],
                       datas: Generator, net_returns: pd.DataFrame,
                       rank_or_normal: bool = True, active_fun: Callable = None) -> List[Union[str, List]]:
    results = []
    if isinstance(fun_lists[0], str):
        pass
    else:
        fun_lists = [str(fun) for fun in fun_lists]
    fun_iter = iter(fun_lists)  # 创建迭代器，确保逐步取值
    returns = {}
    for i in [1, 2, 5, 7, 10, 20, 40, 60, 90, 180]:
        decay_returns = pr.absolute_true_future_returns(net_returns, i)
        returns[i] = decay_returns
    count = 0
    # from pympler import asizeof
    for data in datas:
        data = [i.cpu().numpy() for i in data]
        batch_size = len(data)  # 当前批次的 data 长度
        fun_name_batch = list(islice(fun_iter, batch_size))  # 取出匹配的 fun
        batch_results = Parallel(n_jobs=max(1, int(os.cpu_count() / 1.5) - 1),
                                 verbose=1,
                                 # prefer="threads",  # 确保使用多线程模式
                                 prefer="processes"  # 确保使用多进程模式
                                 )(delayed(active_fun)(fun,
                                                       data, net_returns.index, net_returns.columns, returns,
                                                       rank_or_normal)
                                   for fun, data in zip(fun_name_batch, data))
        results.extend(batch_results)
        count += 1
        print(f'因子评估完成{count}个batch')

    return results


def factor_ic_eva_pare_roll(fun_lists: Union[list[Callable], List[str]],
                            datas: Generator, net_returns: pd.DataFrame,
                            active_fun: Callable = None
                            ) -> List[Union[str, List]]:
    results = []
    if isinstance(fun_lists[0], str):
        pass
    else:
        fun_lists = [str(fun) for fun in fun_lists]
    fun_iter = iter(fun_lists)  # 创建迭代器，确保逐步取值
    returns = []
    for i in [1, 2, 5, 7, 10, 20, 40, 60, 90, 180]:
        decay_returns = pr.absolute_true_future_returns(net_returns, i)
        returns.append(decay_returns)
    count = 0
    # from pympler import asizeof
    for data in datas:
        data = [i.cpu().numpy() for i in data]
        batch_size = len(data)  # 当前批次的 data 长度
        fun_name_batch = list(islice(fun_iter, batch_size))  # 取出匹配的 fun
        batch_results = Parallel(n_jobs=max(1, int(os.cpu_count() / 1.5) - 1),
                                 verbose=1,
                                 # prefer="threads",  # 确保使用多线程模式
                                 prefer="processes"  # 确保使用多进程模式
                                 )(delayed(active_fun)(fun,
                                                               data, net_returns.index, net_returns.columns, returns,
                                                               )
                                   for fun, data in zip(fun_name_batch, data))
        results.extend(batch_results)
        count += 1
        print(f'因子评估完成{count}个batch')

    return results


def factor_evaluation_pare(datas: Generator, input_fun: Callable, **kwargs):
    count = 1
    results = []
    for data in datas:
        data = [i.cpu().numpy() for i in data]
        batch_results = Parallel(n_jobs=max(1, int(os.cpu_count() / 1.5) - 1),
                                 verbose=1,
                                 # prefer="threads",  # 确保使用多线程模式
                                 prefer="processes"  # 确保使用多进程模式
                                 )(delayed(input_fun)(data, **kwargs) for data in data)
        results.extend(batch_results)
        count += 1
        print(f'因子评估完成{count}个batch')
    return results


def parallel_by_time(data: pd.DataFrame, process_single_time_step: Callable):
    # 并行分批计算
    time_indices = data.index
    batch_size = 500  # 每次处理500个时间步，减少内存压力
    num_jobs = max(1, os.cpu_count() - 1)  # 使用所有CPU核心

    final_results = []

    for i in range(0, len(time_indices), batch_size):
        batch_times = time_indices[i:i + batch_size]
        print(f'Processing batch {i // batch_size + 1} / {len(time_indices) // batch_size + 1}')

        # **只传当前时间步的数据**
        results = Parallel(n_jobs=num_jobs, backend="loky", verbose=1)(
            delayed(process_single_time_step)(data.loc[time].unstack(level=0, sort=False).T) for time in batch_times
        )

        final_results.append(pd.concat(results))  # 合并结果

        # 释放内存
        del results
        gc.collect()
    final_results = pd.concat(final_results)
    final_results.index = data.index
    return final_results


def parallel_by_code(data: pd.DataFrame, process_single_code: Callable, **kwargs):
    # 获取所有标的
    all_codes = data.columns.get_level_values(0).unique()

    # 分批处理，防止 OOM
    batch_size = 200  # 每次处理 200 个标的
    num_jobs = max(1, os.cpu_count() - 1)  # 使用所有CPU核心

    final_results = []

    for i in range(0, len(all_codes), batch_size):
        batch_codes = all_codes[i:i + batch_size]
        print(f'Processing batch {i // batch_size + 1} / {len(all_codes) // batch_size + 1}')

        results = Parallel(n_jobs=num_jobs, backend="loky", verbose=1)(
            delayed(process_single_code)(data[[code]].droplevel(0, axis=1), **kwargs) for code in batch_codes
        )

        # 重新组合计算结果
        batch_results_df = pd.concat(results, axis=1)
        batch_results_df.columns = pd.MultiIndex.from_product([batch_codes, [results[0].name]])
        final_results.append(batch_results_df)
        # 释放内存
        del results
        gc.collect()

    # 合并回原数据
    final_results = pd.concat(final_results, axis=1)
    # **拼接回 `data`**
    data = pd.concat([data, final_results], axis=1)
    return data


def parallel_by_fun(fun: Callable, datas: Iterable[List[torch.Tensor]], need_callback: bool = False, **kwargs):
    results = []
    if need_callback:
        batch_results, callback_data = zip(*Parallel(
            n_jobs=max(1, int(os.cpu_count() / 1.5) - 1),
            verbose=1,
            prefer="processes"
        )(
            delayed(lambda data: (fun([d.cpu().numpy() for d in data], **kwargs),  # 处理后的结果
                                  [d.cpu().numpy() for d in data]))  # 处理后的数据
            (data) for data in datas
        ))
        return batch_results, callback_data
    else:
        batch_results = Parallel(n_jobs=max(1, int(os.cpu_count() / 1.5) - 1),
                                 verbose=1,
                                 # prefer="threads",  # 确保使用多线程模式
                                 prefer="processes"  # 确保使用多进程模式
                                 )(delayed(fun)([d.cpu().numpy() for d in data], **kwargs) for data in datas)
        results.extend(batch_results)

    return results


def parallel_by_fun_return_path(fun: Callable, datas: Iterable[List[torch.Tensor]], need_callback: bool = False,
                                **kwargs):
    def wrapper_return_path(data):
        np_data = [d.cpu().numpy() for d in data]
        result_path = fun(np_data, **kwargs)  # 子进程返回路径
        return result_path, np_data if need_callback else None

    jobs = [
        delayed(wrapper_return_path)(data)
        for data in datas
    ]

    results = Parallel(
        n_jobs=max(1, int(os.cpu_count() / 1.5) - 1),
        verbose=1,
        prefer="threads"
    )(jobs)

    if need_callback:
        result_paths, callback_data = zip(*results)
    else:
        result_paths = [r for r, _ in results]
        callback_data = None

    # 主进程读取所有 pkl 数据
    loaded_data = [pd.read_pickle(path) for path in result_paths]

    # 清理临时文件
    for path in result_paths:
        try:
            os.remove(path)
        except Exception as e:
            print(f"[Warning] 删除临时文件失败: {path}，原因: {e}")

    return (loaded_data, list(callback_data)) if need_callback else loaded_data


def parallel_by_fun_single(fun: Callable, datas: Iterable[List[torch.Tensor]], **kwargs):
    results = []
    batch_results = Parallel(n_jobs=max(1, int(os.cpu_count() / 1.5) - 1),
                             verbose=1,
                             # prefer="threads",  # 确保使用多线程模式
                             prefer="processes"  # 确保使用多进程模式
                             )(delayed(fun)(d.cpu().numpy(), **kwargs) for data in datas for d in data)
    results.extend(batch_results)

    return results


def parallel_by_fun_for_list(fun: Callable, datas: List[np.ndarray], **kwargs):
    results = []
    batch_results = Parallel(n_jobs=max(1, int(os.cpu_count() / 2) - 1),
                             verbose=1,
                             # prefer="threads",  # 确保使用多线程模式
                             prefer="processes"  # 确保使用多进程模式
                             )(delayed(fun)(data, **kwargs) for data in datas)
    results.extend(batch_results)

    return results

