"""
================================================================================
                     ------------utf-8--------------
================================================================================
@Author: rfdsg
@Create Time: 2023/12/29 - 11:59
@Description:
@Attention:
"""
import os
import warnings
from collections import defaultdict
from concurrent.futures import ProcessPoolExecutor
from dataclasses import dataclass, field
from multiprocessing import get_context, shared_memory
from typing import Optional, List, Dict, Literal, Tuple, Any
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from matplotlib import pyplot as plt
from numba import njit
from numba.np.arrayobj import sliding_window_view
from pandas import DataFrame
from scipy.spatial import KDTree
from scipy.stats import stats, linregress
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_squared_error, roc_auc_score, mutual_info_score
from sklearn.model_selection import train_test_split, KFold
from tqdm import tqdm
from typing_extensions import Union
from util import DS
from util.easy_type import NS
from util.eva_tool import Graph
from util.transInfo import TransInfo
from util.data_process import pr
import statsmodels.api as sm
import lightgbm as lgb



class TimeEvaluation:
    def __init__(self):
        pass

    @staticmethod
    def corr_line(factor: pd.Series, y: pd.Series, windows: int = 200) -> pd.DataFrame:
        # 计算滑动相关性
        window_size = windows  # 滑动窗口大小
        rolling_corr = factor.rolling(window=window_size, min_periods=2).corr(y)
        rolling_corr.bfill(inplace=True)
        return rolling_corr

        # 绘制相关性曲线
        plt.figure(figsize=(12, 6))
        plt.plot(rolling_corr, label=f"Rolling Correlation (Window={window_size})", color="blue")
        plt.axhline(0, color="red", linestyle="--", label="Zero Correlation")
        plt.title("Factor and Y Rolling Correlation")
        plt.xlabel("Date")
        plt.ylabel("Correlation")
        plt.legend()
        plt.grid(True)
        plt.show()



class EvaluationMetrics:
    """
    注意：绝对不能使用np.argsort(np.argsort(这样的双sort，因为它无法在大量相同值时返回正确的rank，必须使用average类型的

    """

    def __init__(self):
        pass

    @staticmethod
    def P_rank_ic(x: pd.DataFrame, y: pd.DataFrame) -> Union[float, pd.Series]:
        """

        Args:
            x:
            y:

        Returns:

        Warnings:
            corrwith会自动忽略NaN，但需警惕因样本减少导致的统计偏差。

        """
        x = x.dropna(axis=1, how='all')  # 删除所有值都是NaN的列
        y = y.dropna(axis=1, how='all')
        # factor和forward_returns为DataFrame，索引为日期，列为股票
        ic = x.corrwith(y, method='spearman', axis=1)
        if ic.count() < 9 / x.shape[0]:
            ic[:] = np.nan
        return ic

    @staticmethod
    def NP_ir(ic: NS):
        return np.nanmean(ic) / np.nanstd(ic)

    @staticmethod
    def N_rank_ic(x: np.ndarray, y: np.ndarray, allow_nan: int) -> np.ndarray:
        """
        使用 NumPy 计算 rank IC，等价于 Pandas 的 corrwith(axis=1)

        Args:
            allow_nan:
            x: shape = (n_dates, n_assets)，每行是某天所有资产的因子值
            y: shape = (n_dates, n_assets)，每行是对应的未来收益

        Returns:
            每一行的秩相关系数（Spearman相关性）
        """
        from scipy.stats import rankdata
        x = np.atleast_2d(x)
        y = np.atleast_2d(y)

        if x.shape != y.shape:
            raise ValueError("因子矩阵和收益矩阵形状必须相同！")

        # 计算秩
        rank_factor = np.apply_along_axis(rankdata, 1, x)
        rank_returns = np.apply_along_axis(rankdata, 1, y)

        spearman_ic = EvaluationMetrics.N_normal_ic(rank_factor, rank_returns, allow_nan)
        return spearman_ic

    @staticmethod
    def N_normal_ic(x: np.ndarray, y: np.ndarray, allow_nan: int) -> np.ndarray:
        """
        使用 NumPy 计算 normal IC，等价于 Pandas 的 corrwith(axis=1)

        Args:
            allow_nan:
            x: shape = (n_dates, n_assets)，每行是某天所有资产的因子值
            y: shape = (n_dates, n_assets)，每行是对应的未来收益

        Returns:
            每一行的相关系数（pearson相关性）
        """
        # 计算均值
        mean_factor = np.mean(x, axis=1, keepdims=True)
        mean_returns = np.mean(y, axis=1, keepdims=True)

        # 修正协方差计算（使用样本协方差）
        cov_matrix = np.mean((x - mean_factor) * (y - mean_returns), axis=1)

        # 计算标准差（样本标准差）
        std_factor = np.nanstd(x, axis=1, ddof=1)
        std_returns = np.nanstd(y, axis=1, ddof=1)
        if sum(np.isnan(y).all(axis=1)) > allow_nan:
            raise ValueError('超出预料的全空行')
        # pearson IC时处理分母为零的情况
        with np.errstate(divide='ignore', invalid='ignore'):
            pearson_ic = cov_matrix / (std_factor * std_returns)
            pearson_ic = np.where(std_factor * std_returns == 0, np.nan, pearson_ic)

        return pearson_ic

    @staticmethod
    def N_normal_ic_0(xi, yi):
        mean_x = np.nanmean(xi, axis=0)
        mean_y = np.nanmean(yi, axis=0)

        cov = np.nanmean((xi - mean_x) * (yi - mean_y), axis=0)
        std_x = np.nanstd(xi, axis=0, ddof=1)
        std_y = np.nanstd(yi, axis=0, ddof=1)

        with np.errstate(divide='ignore', invalid='ignore'):
            ic = cov / (std_x * std_y)
            ic = np.where((std_x * std_y) == 0, np.nan, ic)
        return ic

    @staticmethod
    @njit
    def N_roll_ic_numba(x: np.ndarray, y: np.ndarray, window: int = 20) -> np.ndarray:
        T, N = x.shape
        ic_result = np.full((T, N), np.nan)

        # 处理前 window-1 行，用长度 i+1 的窗口计算
        for i in range(window):
            xi = x[:i + 1]
            yi = y[:i + 1]

            mean_x = np.mean(xi, axis=0)
            mean_y = np.mean(yi, axis=0)

            cov = np.sum((xi - mean_x) * (yi - mean_y), axis=0) / max(i, 1)
            std_x = np.std(xi, axis=0, ddof=1)
            std_y = np.std(yi, axis=0, ddof=1)

            for j in range(N):
                denom = std_x[j] * std_y[j]
                if denom == 0:
                    ic_result[i, j] = np.nan
                else:
                    ic_result[i, j] = cov[j] / denom

        # 从 window 开始的完整窗口，用纯for循环滑动计算
        for t in range(window, T):
            for j in range(N):
                xi = x[t - window + 1:t + 1, j]
                yi = y[t - window + 1:t + 1, j]

                mean_x = 0.0
                mean_y = 0.0

                for k in range(window):
                    mean_x += xi[k]
                    mean_y += yi[k]
                mean_x /= window
                mean_y /= window

                cov_sum = 0.0
                var_x_sum = 0.0
                var_y_sum = 0.0

                for k in range(window):
                    dx = xi[k] - mean_x
                    dy = yi[k] - mean_y
                    cov_sum += dx * dy
                    var_x_sum += dx * dx
                    var_y_sum += dy * dy

                denom = np.sqrt(var_x_sum / (window - 1)) * np.sqrt(var_y_sum / (window - 1))
                if denom == 0:
                    ic_result[t, j] = np.nan
                else:
                    ic_result[t, j] = cov_sum / (window - 1) / denom

        return ic_result

    @staticmethod
    def N_roll_ic(x: np.ndarray, y: np.ndarray, window: int = 20) -> np.ndarray:
        """
        在时间维度上对每个资产计算 rolling IC，前 window-1 行使用已有数据

        Args:
            x: shape = (n_dates, n_assets)
            y: shape = (n_dates, n_assets)
            window: int, 最大窗口大小

        Returns:
            shape = (n_dates, n_assets)，rolling IC
        """
        T, N = x.shape
        ic_result = np.full((T, N), np.nan)

        for i in range(1, window - 1):
            # 用长度为 i+1 的窗口计算
            xi = x[:i + 1]
            yi = y[:i + 1]

            ic = EvaluationMetrics.N_normal_ic_0(xi, yi)
            ic_result[i] = ic

        # 再处理从 window 开始的完整窗口
        from numpy.lib.stride_tricks import sliding_window_view
        x_win = sliding_window_view(x, window_shape=window, axis=0)
        y_win = sliding_window_view(y, window_shape=window, axis=0)

        mean_x = np.nanmean(x_win, axis=2)
        mean_y = np.nanmean(y_win, axis=2)
        cov = np.nanmean((x_win - mean_x[:, :, np.newaxis]) * (y_win - mean_y[:, :, np.newaxis]), axis=2)
        std_x = np.nanstd(x_win, axis=2, ddof=1)
        std_y = np.nanstd(y_win, axis=2, ddof=1)

        with np.errstate(divide='ignore', invalid='ignore'):
            ic = cov / (std_x * std_y)
            ic = np.where((std_x * std_y) == 0, np.nan, ic)
        ic_result[window - 1:] = ic
        return ic_result

    @staticmethod
    def N_roll_ic_no_same(x: np.ndarray, y: np.ndarray, window: int = 20) -> np.ndarray:
        """
        在时间维度上对每个资产计算 rolling IC，前 window-1 行使用已有数据

        Args:
            x: shape = (n_dates, n_assets)
            y: shape = (n_dates, n_assets)
            window: int, 最大窗口大小

        Returns:
            shape = (n_dates, n_assets)，rolling IC
        """
        T, N = x.shape
        ic_result = np.full((T - window + 1, N), np.nan)

        # 再处理从 window 开始的完整窗口
        from numpy.lib.stride_tricks import sliding_window_view
        x_win = sliding_window_view(x, window_shape=window, axis=0)
        y_win = sliding_window_view(y, window_shape=window, axis=0)

        mean_x = np.nanmean(x_win, axis=2)
        mean_y = np.nanmean(y_win, axis=2)
        cov = np.nanmean((x_win - mean_x[:, :, np.newaxis]) * (y_win - mean_y[:, :, np.newaxis]), axis=2)
        std_x = np.nanstd(x_win, axis=2, ddof=1)
        std_y = np.nanstd(y_win, axis=2, ddof=1)

        with np.errstate(divide='ignore', invalid='ignore'):
            ic = cov / (std_x * std_y)
            ic = np.where((std_x * std_y) == 0, np.nan, ic)
        ic_result[:] = ic
        return ic_result

    @staticmethod
    def P_roll_ic(factor: DS,
                  y: Union[pd.DataFrame, pd.Series],
                  windows: int = 20
                  ) -> Union[pd.DataFrame, pd.Series]:
        # 计算滑动相关性
        window_size = windows  # 滑动窗口大小
        rolling_corr = factor.rolling(window=window_size, min_periods=2).corr(y)
        return rolling_corr

    @staticmethod
    def N_rolling_rank_ic(factor: np.ndarray, returns: np.ndarray, window: int = 200) -> pd.Series:
        spearman_ic = EvaluationMetrics.N_rank_ic(factor, returns, 20)
        # 使用pandas进行滚动平均并保持时间长度一致
        ic_series = pd.Series(spearman_ic)
        ic_values = ic_series.rolling(window=window, min_periods=1).mean()
        return ic_values

    @staticmethod
    def N_rolling_rank_ir(factor: np.ndarray, returns: np.ndarray, window: int = 200) -> pd.Series:
        ic_values = EvaluationMetrics.N_rolling_rank_ic(factor, returns, window)
        ir_values = EvaluationMetrics.NP_ir(ic_values)
        return ir_values

    @staticmethod
    def rolling(data: DS, windows: int = 200):
        return data.rolling(window=windows, min_periods=1)

    @staticmethod
    def P_pure_ic(factor: DS, y: DS) -> Union[float, pd.Series]:
        pure_ic = factor.corr(y, method="pearson")
        return pure_ic

    @staticmethod
    def several_decay_ic(*, rank_or_normal: bool = True, factor: np.ndarray, holding_day: List[int],
                         y: Optional[List[np.ndarray]]) -> Union[pd.DataFrame, pd.Series]:
        """

        Args:
            holding_day:
            rank_or_normal: True为使用rank，否则为normal_ic
            factor:
            y:

        Returns:
            ic_frame.columns = holding_days
        """
        ic_list = []
        returns: List[np.ndarray] = y
        for i, day in zip(returns, holding_day):
            x = factor
            if rank_or_normal:
                ic = EvaluationMetrics.N_rank_ic(x, i, day)
            else:
                ic = EvaluationMetrics.N_normal_ic(x, i, day)
            ic = pd.Series(ic)
            ic_list.append(ic)
        ic_frame = pd.concat(ic_list, axis=1)
        ic_frame.columns = holding_day
        ic_frame = ic_frame
        return ic_frame

    @staticmethod
    def several_decay_ic_no_decay(*, factor: np.ndarray, y: List[np.ndarray], holding_day: List, index: pd.Index,
                                  columns: pd.Index, windows: int = 20, same_shape: bool = True) -> dict[int, Any]:
        """

        Args:
            same_shape:
            windows:
            index:
            columns:
            holding_day:
            factor:
            y:

        Returns:
            ic_frame.columns = holding_days
        """
        ic_list = {}
        returns = y
        x = factor
        for i, day in zip(returns, holding_day):
            if same_shape:
                ic = EvaluationMetrics.N_roll_ic(x, i, windows)
            else:
                ic = EvaluationMetrics.N_roll_ic_no_same(x, i, windows)
            # ic_list[day] = ic
            ic_list[day] = pd.DataFrame(ic, index=index, columns=columns)
        return ic_list

    @staticmethod
    def several_sort_area(*, factor: np.ndarray, y: np.ndarray, windows: int = 20,
                          high_min: tuple = [0.2, -0.2]) -> tuple[np.ndarray, np.ndarray]:
        """

        Args:
            high_min:
            windows:
            factor:
            y:

        Returns:
            ic_frame.columns = holding_days
        """
        x = factor
        ic = EvaluationMetrics.N_roll_ic_no_same(x, y, windows)
        mask: np.ndarray = (ic >= high_min[1]) & (ic <= high_min[0])
        mask = np.concatenate([np.zeros((windows - 1, mask.shape[1]), dtype=bool), mask], axis=0)
        # 获取满足条件的位置
        results = np.zeros_like(mask)
        for n in range(mask.shape[1]):
            results[:, n] = pr.keep_longest_true_segment(mask[:, n], windows, int(mask.shape[0] / 100))
        # # 先找到是否每列有 True
        # has_true = results.any(axis=0)  # shape = (N,)
        # # 获取第一个 True 的索引
        # first_true = np.argmax(results, axis=0) + windows  # shape = (N,)
        # # 获取最后一个 True 的索引
        # # Trick: 反转行方向，然后从后向前的第一个 True 索引
        # last_true = (results.shape[0] - 1 - np.argmax(np.flip(results, axis=0), axis=0)) + windows
        # # 对于没有任何 True 的列，first_true 和 last_true 都是 0，需屏蔽掉
        # first_true[~has_true] = -1
        # last_true[~has_true] = -1
        return results

    @staticmethod
    def lamination_by_cap(
            market_cap: pd.DataFrame,
            factor: Union[pd.DataFrame, pd.Series],
            y: Union[pd.DataFrame, pd.Series],
    ) -> Union[pd.DataFrame, pd.Series]:
        # 按每日市值大小分为5组
        bins = np.linspace(0, 1, 6)  # 0~1之间分5组
        size_groups = market_cap.rank(axis=1, pct=True).apply(
            lambda x: pd.cut(x, bins, labels=['Small', 'Mid-Small', 'Mid', 'Mid-Large', 'Large']),
            axis=1
        )
        group_ics = {}
        for group in ['Small', 'Mid-Small', 'Mid', 'Mid-Large', 'Large']:
            # 筛选组内股票
            mask = size_groups == group
            group_factor = factor[mask]
            group_returns = y[mask]
            # 计算IC
            group_ic = EvaluationMetrics.P_rank_ic(group_factor, group_returns)
            group_ics[group] = group_ic

        group_ic_df = pd.DataFrame(group_ics)
        return group_ic_df

    @staticmethod
    def split_by_mask(ic_frame: Union[pd.Series, pd.DataFrame], mask_series: pd.Series):
        ic_frame[mask_series.name] = mask_series
        ic_frame = ic_frame.groupby(mask_series.name)
        return ic_frame.mean() / ic_frame.std()

    @staticmethod
    def y_by_factor_group(factor: pd.DataFrame,
                          y: pd.DataFrame) -> (float, float):
        # 改用等距分箱（cut）
        bins = np.linspace(0, 1, 6)  # 0~1之间分5组
        # 示例：按分位数分组
        groups = factor.rank(axis=1, pct=True).apply(lambda x: pd.cut(x, bins, labels=False))
        turnover = groups.diff().abs().sum(axis=1) / len(groups.columns)
        group_returns = y.groupby(groups)
        return group_returns.mean() / group_returns.std(), turnover

    @staticmethod
    def linear_regression_with_vif(X: Union[pd.Series, pd.DataFrame], y, code):
        """

        Args:
            X:拟合项
            y:被拟合项
            code:代码

        Returns:

        """
        # 将数据分割为训练集和测试集
        train_index = int(len(X) * 0.8)
        train_factor = X.iloc[:train_index]
        test_factor = X.iloc[train_index:]

        train_returns = y.iloc[:train_index]
        test_returns = y.iloc[train_index:]

        # 拟合线性回归模型
        model = sm.OLS(train_returns, train_factor).fit()
        # mse
        predicted_returns = model.predict(test_factor)
        mse = mean_squared_error(test_returns, predicted_returns)
        print(f'Mean Squared Error: {mse}')

        # 输出每个因子的斜率
        print(f"{code}的斜率为:")
        from statsmodels.stats.outliers_influence import variance_inflation_factor
        # 计算VIF VIF大于5或10有问题
        vif_data = pd.Series([variance_inflation_factor(X.values, i) for i in range(X.shape[1])])
        vif_data.index = X.columns
        # 输出VIF
        print(f"{code}的VIF:")
        print(vif_data)

        return model

    @staticmethod
    def random_check(factor: Union[pd.DataFrame, pd.Series], price: Union[pd.DataFrame, pd.Series]):
        np.random.seed(42)
        random_factor = pd.DataFrame(
            np.random.randn(len(factor.index), len(factor.columns)),
            index=factor.index, columns=factor.columns
        )
        ic_decay_frame = EvaluationMetrics.several_decay_ic(factor=0, holding_day=0, y=0)
        long_short_returns = EvaluationMetrics.long_short_returns(random_factor, price)
        return ic_decay_frame, long_short_returns

    @staticmethod
    def vif(factor: Union[pd.DataFrame, pd.Series]):
        from statsmodels.stats.outliers_influence import variance_inflation_factor

        # 计算VIF（需先添加常数项）
        X = sm.add_constant(factor)  # factors_df为因子值矩阵
        vif = pd.DataFrame()
        vif["Variable"] = X.columns
        vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape)]

        # 筛选高VIF因子
        high_vif = vif[vif['VIF'] > 10]  # 阈值通常设为5-10
        return high_vif.sort_values('VIF', ascending=False)

    @staticmethod
    def auc_evaluation(x_train: DS, x_val: DS, model=None):
        # 合并训练集和测试集，添加标签
        x_train['is_test'] = 0
        x_val['is_test'] = 1
        combined_data = pd.concat([x_train, x_val], axis=0)

        # 特征和标签
        X = combined_data.drop('is_test', axis=1)
        y = combined_data['is_test']

        # 划分验证集
        X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, stratify=y)

        # 训练分类器
        model = model or RandomForestClassifier()
        model.fit(X_train, y_train)

        # 计算AUC
        y_pred = model.predict_proba(X_val)[:, 1]
        auc = roc_auc_score(y_val, y_pred)
        print(f"AUC: {auc}")
        # 结果解读
        if auc < 0.55:
            print("数据分布一致性良好，可以继续建模")
        elif auc < 0.65:
            print("存在轻微分布差异，建议检查特征工程")
        else:
            print("警告：数据分布差异显著！需排查数据泄露或重新划分数据集")
        return auc

    @staticmethod
    def calculate_auc(model, X_train, y_train, X_val, y_val):
        """
        训练模型并计算验证集的AUC
        Args:
            model: 初始化的模型对象（如 RandomForestClassifier()）
            X_train (DataFrame): 训练集特征
            y_train (Series): 训练集标签
            X_val (DataFrame): 验证集特征
            y_val (Series): 验证集标签
        Returns:
            auc (float): 验证集的 AUC 值
        """
        # 训练模型
        model.fit(X_train, y_train)
        # 预测概率（假设是二分类任务，取正类的概率）
        y_pred_proba = model.predict_proba(X_val)[:, 1]
        # 计算AUC
        auc = roc_auc_score(y_val, y_pred_proba)
        # 结果解读
        if auc < 0.55:
            print("数据分布一致性良好，可以继续建模")
        elif auc < 0.65:
            print("存在轻微分布差异，建议检查特征工程")
        else:
            print("警告：数据分布差异显著！需排查数据泄露或重新划分数据集")
        return auc

    @staticmethod
    def kfold_cross_validation(X, y, n_splits=5, random_state=42):
        """
        执行K折交叉验证并返回每折的AUC和平均结果
        Args:
            X (DataFrame): 特征数据
            y (Series): 标签数据
            n_splits (int): 折数
            random_state (int): 随机种子
        Returns:
            auc_scores (list): 每折的AUC值
            mean_auc (float): 平均AUC
            std_auc (float): AUC的标准差
        """
        kf = KFold(n_splits=n_splits, shuffle=False, random_state=random_state)
        auc_scores = []

        for fold, (train_idx, val_idx) in enumerate(kf.split(X)):
            # 对当前折执行对抗验证
            X_adv = pd.concat([X.iloc[train_idx], X.iloc[val_idx]])
            y_adv = np.array([0] * len(train_idx) + [1] * len(val_idx))
            # 按索引分割DataFrame
            X_train_fold, X_val_fold = X_adv.iloc[train_idx], X.iloc[val_idx]
            y_train_fold, y_val_fold = y_adv.iloc[train_idx], y_adv.iloc[val_idx]

            # 调用AUC计算函数
            fold_auc = EvaluationMetrics.calculate_auc(X_train_fold, y_train_fold, X_val_fold, y_val_fold)
            auc_scores.append(fold_auc)
            print(f"Fold {fold + 1} AUC: {fold_auc:.4f}")

        # 汇总结果
        mean_auc = np.mean(auc_scores)
        std_auc = np.std(auc_scores)
        print(f"\n平均 AUC: {mean_auc:.4f} ± {std_auc:.4f}")
        return auc_scores, mean_auc, std_auc

    @staticmethod
    def long_short_returns(factor: Union[pd.DataFrame, pd.Series],
                           y: Union[pd.DataFrame, pd.Series],
                           decay_num: int = 1,
                           long_or_short: bool = True,
                           ):
        # 计算分位数并分箱
        percentiles = factor.rank(axis=1, pct=True)
        bins = np.linspace(0, 1, 6)
        bins[-1] += 1e-8  # 确保右边界包含

        # 使用digitize进行向量化分箱
        groups_np = np.digitize(percentiles.to_numpy(), bins, right=False) - 1
        groups = pd.DataFrame(groups_np, index=factor.index, columns=factor.columns)

        top_stocks = groups == 4
        bottom_stocks = groups == 0

        top_returns = y[top_stocks].mean(axis=1) / decay_num
        bottom_returns = y[bottom_stocks].mean(axis=1) / decay_num

        return top_returns - bottom_returns if long_or_short else bottom_returns - top_returns

    @staticmethod
    def separate_other_factor(ic_series: pd.Series, other_factors: pd.DataFrame):
        """

        Args:
            ic_series:
            other_factors:

        Returns:

        Notes:
            回归系数（Coefficients）
            含义：表示其他因子对当前因子 IC 的影响程度。

            判断标准：

            若某个因子的回归系数 不显著（p 值 > 0.05），说明该因子对当前因子的 IC 无显著影响，当前因子可能具备独立能力。

            若回归系数 显著（p 值 ≤ 0.05），说明当前因子的预测能力可能被该因子部分解释。

            2. 截距项（Intercept）
            含义：表示在其他因子为 0 时，当前因子的 IC 值。

            判断标准：

            若截距项 显著非零（p 值 ≤ 0.05），说明当前因子具备独立于其他因子的预测能力。

            若截距项 不显著（p 值 > 0.05），说明当前因子的预测能力可能完全由其他因子解释。

            3. 模型拟合优度（R-squared）
            含义：表示其他因子对当前因子 IC 的解释程度，取值范围为 [0, 1]。

            判断标准：

            若 R-squared 较低（如 < 0.3），说明其他因子对当前因子 IC 的解释能力有限，当前因子可能具备独立能力。

            若 R-squared 较高（如 > 0.7），说明当前因子的预测能力可能被其他因子高度解释。

            4. 残差分析
            含义：回归模型的残差表示当前因子 IC 中未被其他因子解释的部分。

            判断标准：

            若残差的均值显著非零，说明当前因子具备独立能力。

            若残差的分布无明显规律（如正态分布），说明模型假设合理。

            5. 多重共线性检验
            含义：检验其他因子之间是否存在高度相关性，避免回归结果失真。

            判断标准：

            使用 方差膨胀因子（VIF）：若某个因子的 VIF > 10，说明存在多重共线性，需剔除或合并相关因子。

            使用 相关系数矩阵：若某些因子的相关系数 > 0.8，可能存在共线性。
        """
        model = sm.OLS(ic_series, sm.add_constant(other_factors)).fit()
        print(model.summary())

    @staticmethod
    def brinson(df: pd.DataFrame):
        df['Q1'] = df['rb_i'] * df['wb_i']
        df['Q2'] = df['rb_i'] * df['wp_i']
        df['Q3'] = df['rp_i'] * df['wb_i']
        df['Q4'] = df['rp_i'] * df['wp_i']
        result = pd.DataFrame()
        result['板块配置'] = df['Q2'] - df['Q1'] - (df['wp_i'] - df['wb_i']) * sum(df['Q1'])
        result['板块内选股'] = df['Q3'] - df['Q1']
        result['交互收益'] = df['Q4'] - df['Q3'] - df['Q2'] + df['Q1']
        result['总超额收益'] = df['Q4'] - df['Q1']
        result.loc['合计'] = result.apply(lambda x: x.sum(), axis=0)
        AR = result.loc['合计', '板块配置']
        SR = result.loc['合计', '板块内选股'] + result.loc['合计', '交互收益']
        total_R = result.loc['合计', '总超额收益']
        print('AR={:.2%},SR={:.2%},总超额收益={:.2%}'.format(AR, SR, total_R))

    @staticmethod
    def shap_evaluation(model: lgb.Booster, val, features):
        # 计算SHAP值
        import shap
        explainer = shap.TreeExplainer(model)
        shap_values = explainer.shap_values(val[features])

        # 汇总重要因子
        top_factors = [
            features[i]
            for i in np.argsort(np.abs(shap_values).mean(0))[::-1][:20]
        ]
        print("Top 20 Factors:", top_factors)
        return top_factors

    @staticmethod
    def lgb_flatten_ic(model: lgb.Booster, train, val):
        # 训练集与验证集性能差异
        train_pred = model.predict(train.drop(columns=['target']))
        val_pred = model.predict(val.drop(columns=['target']))
        train_ic = np.corrcoef(train_pred, train['target'])[0, 1]
        val_ic = np.corrcoef(val_pred, val['target'])[0, 1]
        ic_gap = train_ic - val_ic
        print(f'train_ic={train_ic}, val_ic={val_ic}, ic_gap={ic_gap}')
        # 过拟合警报规则
        if ic_gap > 0.15:
            print(f'警告：过拟合风险高（IC差值={ic_gap:.2f}）')
        elif ic_gap > 0.1:
            print(f'注意：可能存在过拟合（IC差值={ic_gap:.2f}）')
        else:
            print(f'模型泛化正常（IC差值={ic_gap:.2f}）')

    @staticmethod
    def lgb_rank_ic(model: lgb.Booster, data: DS = None, *, columns=None, index=None,
                    returns: pd.DataFrame = None):
        """

        Args:
            model:
            data:
            columns:
            index:
            returns: 真实的y，即真实的收益率

        Returns:

        """
        data_pre = model.predict(data)
        data_pre = pr.unflatten(data_pre, columns, index)
        rank_ic = EvaluationMetrics.P_rank_ic(data_pre, returns)
        # print(f'rank_ic_ir为{rank_ic.mean() / rank_ic.std()}')
        return rank_ic, data_pre

    @staticmethod
    def unflatten_ic(data_pre: np.ndarray, columns, index, returns: pd.DataFrame):
        """

        Args:
            data_pre:
            columns:
            index:
            returns: 真实的y，即真实的收益率

        Returns:

        """
        data_pre = pr.unflatten(data_pre, columns, index)
        rank_ic = EvaluationMetrics.P_rank_ic(data_pre, returns)
        # print(f'rank_ic_ir为{rank_ic.mean() / rank_ic.std()}')
        return rank_ic, data_pre

    @staticmethod
    def evaluate_ic_series(ic_series: pd.Series, show_plot: bool = True,
                           *, which_dataset: Literal['train', 'valid', 'test'] = 'train') -> pd.Series:
        """
        输入 IC 序列，输出 IC 均值、波动、IR、p值等统计量，并绘制 IC 时间序列图。

        参数：
            ic_series (pd.Series): rank IC 序列，index 为时间
            show_plot (bool): 是否显示 IC 时间序列图

        返回：
            pd.Series: 包含评估指标的 Series
        """
        ic_series = ic_series.dropna()

        # 评估指标
        mean_ic = ic_series.mean()
        std_ic = ic_series.std()
        ir = mean_ic / (std_ic + 1e-8)
        t_stat, p_value = stats.ttest_1samp(ic_series, 0)

        result = pd.Series({
            'dataset_name': which_dataset,
            "Mean IC": mean_ic,
            '中位数': ic_series.quantile(0.5),
            '0.75分位数': ic_series.quantile(0.75),
            "Std IC": std_ic,
            "IR (Information Ratio)": ir,
            "t-stat": t_stat,
            "p-value": p_value,
            "Significant (>0.05)": p_value < 0.05
        })
        print(result)

        if show_plot:
            fig, axs = plt.subplots(2, 1, figsize=(12, 8), sharex=True)

            # 第一张子图：IC time series
            axs[0].plot(ic_series, label="IC")
            axs[0].axhline(0, color='red', linestyle='--', linewidth=1)
            axs[0].set_title(f"{which_dataset}'s IC Time Series (IR = {ir:.2f})")
            axs[0].set_ylabel("Rank IC")
            axs[0].grid(True)
            axs[0].legend()

            # 第二张子图：IC 累积和
            ic_cumsum = ic_series.cumsum()
            axs[1].plot(ic_cumsum, label='IC Cumsum', color='blue')
            axs[1].axhline(0, color='black', linestyle='--', linewidth=1)
            axs[1].set_title(f"{which_dataset}'s IC cumsum")
            axs[1].set_xlabel("Date")
            axs[1].set_ylabel("Cumsum IC")
            axs[1].grid(True)
            axs[1].legend()

            plt.tight_layout()
            plt.show()

        return result


eva = EvaluationMetrics



class FactorEvaluation(Graph):
    def __init__(self, factor: np.ndarray, price: np.ndarray,
                 returns_for_future: Optional[dict[int, pd.DataFrame]] = None, holding_day: list = None,
                 rank_or_normal: bool = True, index: pd.Index = None, columns: pd.Index = None):
        input_dict = {'factor': factor, 'price': price}
        self.check_data(input_dict)
        if returns_for_future is not None:
            holding_day = list(returns_for_future.keys())
            warnings.warn('returns_for_future被提供，取用returns_for_future的key值作为holding_day')
        elif holding_day is None:
            holding_day = [1, 2, 5, 7, 10, 20, 40, 60, 90, 180]
        input_dict.update({'returns_for_future': returns_for_future})
        input_dict.update({'holding_day': holding_day, 'rank_or_normal': rank_or_normal, 'index': index,
                           'columns': columns})
        super().__init__(input_dict)

    @staticmethod
    def is_single_value(obj):
        """
        返回False为检验通过
        Args:
            obj:

        Returns:

        """
        if isinstance(obj, pd.DataFrame):
            return obj.nunique(axis=1).eq(1).sum() > 400 or obj.nunique(axis=0).eq(1).sum() > (len(obj.columns) / 2)
        return False  # 不是 DataFrame 或 Series

    @staticmethod
    def is_nan(data):
        if np.isnan(data).all().any():
            return True

    @staticmethod
    def check_data(data: dict) -> bool:
        out = [FactorEvaluation.is_single_value(i) for i in data.items()]
        out_nan = [FactorEvaluation.is_nan(i) for i in data.values()]
        if any(out) or any(out_nan):
            return True

    @staticmethod
    def discretize(arr, bins=5, method='quantile'):
        """对1维ndarray分箱，返回离散标签"""
        if method == 'quantile':
            quantiles = np.percentile(arr, np.linspace(0, 100, bins + 1))
            # 用 digitize 将数据分到 bins 个区间中，重复边界归为同一箱
            labels = np.digitize(arr, quantiles[1:-1], right=True)
        elif method == 'uniform':
            edges = np.linspace(arr.min(), arr.max(), bins + 1)
            labels = np.digitize(arr, edges[1:-1], right=True)
        else:
            raise ValueError("method must be 'quantile' or 'uniform'")
        return labels

    @staticmethod
    def knn_mutual_info(x, y, k=3):
        """
        基于K近邻估计互信息 (Kraskov方法)

        参数:
        x : 1D array, 因子值
        y : 1D array, 未来收益值
        k : int, 近邻数 (建议3-10)

        返回:
        mi : float, 互信息估计值 (nats)
        """
        n = len(x)
        data = np.column_stack((x.reshape(-1, 1), y.reshape(-1, 1)))

        # 构建KD树加速近邻搜索
        tree = KDTree(data)

        # 查找每个点的k+1近邻(包含自身)
        dists, _ = tree.query(data, k=k + 1)
        epsilon = dists[:, -1]  # 到第k近邻的距离

        # 计算x和y方向上的距离计数
        n_x = np.zeros(n)
        n_y = np.zeros(n)

        for i in range(n):
            # 在x和y方向上的超立方体计数
            mask_x = (np.abs(x - x[i]) < epsilon[i])
            mask_y = (np.abs(y - y[i]) < epsilon[i])

            n_x[i] = np.sum(mask_x) - 1  # 减去自身
            n_y[i] = np.sum(mask_y) - 1

        # 使用digamma函数计算互信息
        mi = np.mean(FactorEvaluation.psi(k) + FactorEvaluation.psi(n) -
                     FactorEvaluation.psi(n_x) - FactorEvaluation.psi(n_y))

        return max(mi, 0)  # 互信息非负

    @staticmethod
    def psi(x):
        """Digamma函数的简单近似，用于小样本更稳定"""
        return np.log(x + 1e-10)  # 更精确的实现可用scipy.special.digamma

    # @staticmethod
    # def mutual_information(self, bins=10, method='quantile'):
    #     df = self.df.copy()
    #     if method == 'quantile':
    #         df['factor_bin'] = pd.qcut(df['factor'], bins, duplicates='drop')
    #         df['ret_bin'] = pd.qcut(df['ret'], bins, duplicates='drop')
    #     elif method == 'uniform':
    #         df['factor_bin'] = pd.cut(df['factor'], bins)
    #         df['ret_bin'] = pd.cut(df['ret'], bins)
    #     else:
    #         raise ValueError("method must be 'quantile' or 'uniform'")
    #     mi = mutual_info_score(df['factor_bin'], df['ret_bin'])
    #     return mi

    @staticmethod
    def rolling_std_periods(arr: np.ndarray, window: int) -> np.ndarray:
        """
        滚动标准差，min_periods=1，沿 axis=0，完全向量化实现。
        输入 arr 形状: (T, N, M)
        返回形状: (T, N, M)
        """
        pad = np.full((window - 1, *arr.shape[1:]), np.nan)
        padded = np.concatenate([pad, arr], axis=0)  # shape: (T + window - 1, N, M)
        windows = sliding_window_view(padded, window_shape=window, axis=0)  # shape: (T, window, N, M)
        stds = np.nanstd(windows, axis=1)  # shape: (T, N, M)
        stds = stds[window - 1:]
        return stds

    @staticmethod
    def rolling_mean_periods(arr: np.ndarray, window: int) -> np.ndarray:
        """
        滚动标准差，min_periods=1，沿 axis=0，完全向量化实现。
        输入 arr 形状: (T, N, M)
        返回形状: (T, N, M)
        """
        pad = np.full((window - 1, *arr.shape[1:]), np.nan)
        padded = np.concatenate([pad, arr], axis=0)  # shape: (T + window - 1, N, M)
        windows = sliding_window_view(padded, window_shape=window, axis=0)  # shape: (T, window, N, M)
        mean = np.nanmean(windows, axis=1)  # shape: (T, N, M)
        mean = mean[window - 1:]
        return mean


    @staticmethod
    def ic_evaluation_np(ic_array: np.ndarray, window: int = 40) -> DataFrame:
        T, N = ic_array.shape
        mask = np.all(~np.isnan(ic_array, axis=0))

        # 均值与标准差
        mean_ic = np.nanmean(ic_array, axis=0)
        std_ic = np.nanstd(ic_array, axis=0)
        normal_ir = mean_ic / std_ic

        # t 统计量 & p 值
        t_stat, p_value = stats.ttest_1samp(ic_array, 0, nan_policy='omit')

        # skew & kurtosis
        skew = stats.skew(ic_array, nan_policy='omit')
        kurt = stats.kurtosis(ic_array, nan_policy='omit')

        # hit ratio：同号的比例
        hit_ratio = np.nanmean(np.sign(ic_array) == np.sign(mean_ic), axis=0)

        # max drawdown：累积减去历史最大
        cumsum = np.nancumsum(np.where(mask, ic_array, 0), axis=0)
        cummax = np.maximum.accumulate(cumsum, axis=0)
        max_drawdown = np.nanmax(cummax - cumsum, axis=0)

        # rolling_std（均值）
        rolling_std = FactorEvaluation.rolling_std_periods(ic_array, window)

        # rolling_ic_max
        rolling_mean = FactorEvaluation.rolling_mean_periods(ic_array, window)
        rolling_ic_max = np.nanmax(rolling_mean)
        rolling_ic_max_ir = rolling_ic_max / std_ic

        # diff & jumps
        diff = np.diff(ic_array, axis=0)
        diff_mean = np.nanmean(diff, axis=0)
        diff_std = np.nanstd(diff, axis=0)
        jumps = np.sum(np.abs((diff - diff_mean) / diff_std) > 2, axis=0)

        # slope（向量化线性拟合）
        x = np.arange(T)
        x_mean = np.mean(x)
        x_centered = x - x_mean
        denominator = np.nansum(x_centered ** 2)
        slope = np.nansum((ic_array - mean_ic) * x_centered[:, None], axis=0) / denominator

        # rolling_std ratio
        std_ratio = rolling_std / std_ic

        # 综合 score
        score = (
                + mean_ic * 100
                + hit_ratio * 50
                + np.clip(t_stat, 0, 5) * 10
                - std_ic * 100
                - np.abs(skew) * 100
                - max_drawdown * 5
                - rolling_std * 100
                + slope * 100
                - kurt * 200
                - (1 - std_ratio) * 50
                - jumps
        )

        out = {
            'mean_ic': mean_ic,
            'std_ic': std_ic,
            'p_value': p_value,
            'rolling_std': rolling_std,
            'skew': skew,
            'max_drawdown': max_drawdown,
            'slope': slope,
            'jumps': jumps,
            'kurt': kurt,
            'hit_ratio': hit_ratio,
            'rolling_ic_max_ir': rolling_ic_max_ir,
            'normal_ir': normal_ir,
            'score': score,
        }
        return pd.DataFrame(out)

    @staticmethod
    def calc_slope(series: pd.Series) -> float:
        series = series.dropna()
        x = np.arange(len(series))
        y = series.values
        return linregress(x, y).slope

    @staticmethod
    def ic_evaluation(ic_frame: pd.DataFrame, window: int = 40,
                      types: str = 'code_ic', decay_num: Union[int, pd.Series] = 1) -> pd.DataFrame:
        ic_series_drop = ic_frame.dropna(how='all')
        # 评估指标
        rolling_ic_max = ic_frame.rolling(window, min_periods=1).mean().max()
        std_ic = ic_frame.std()
        rolling_ic_max_ir = rolling_ic_max / std_ic
        mean_ic = ic_frame.abs().mean()
        normal_ir = (mean_ic / std_ic)
        t_stat, p_value = stats.ttest_1samp(ic_series_drop, 0, nan_policy='omit')  # p_value越小越说明显著偏离 0
        hit_ratio = (np.sign(ic_frame) == np.sign(ic_frame.mean())).mean()
        skew = ic_frame.skew()
        max_drawdown = (ic_frame.cumsum().cummax() - ic_frame.cumsum()).max()
        rolling_std = ic_frame.rolling(window=window).std().mean()
        slope = ic_series_drop.apply(FactorEvaluation.calc_slope)
        kurt = ic_frame.kurt()
        std_ratio = ic_frame.rolling(window).std().mean() / std_ic
        diff = ic_frame.diff()
        jumps = (((diff - diff.mean()) / diff.std()) > 2).sum()
        score = (
                + mean_ic * 100
                + hit_ratio * 50
                + np.clip(t_stat, 0, 5) * 10
                - std_ic * 100
                - abs(skew) * 100
                - max_drawdown * 5
                - rolling_std * 100
                + slope * 100
                - kurt * 200
                - (1 - std_ratio) * 50
                - jumps
        )
        out = {'mean_ic': mean_ic,
               'std_ic': std_ic,
               'p_value': p_value,
               'rolling_std': rolling_std,
               'skew': skew,
               'max_drawdown': max_drawdown,
               'slope': slope,
               'jumps': jumps,
               'kurt': kurt,
               'hit_ratio': hit_ratio,
               'rolling_ic_max_ir': rolling_ic_max_ir,
               'normal_ir': normal_ir,
               'score': score,
               'types': types
               }
        return pd.DataFrame(out)

    @Graph.evaluator
    def factor_split(self, factor):
        return pr.split(factor, False)

    @Graph.evaluator
    def best_series(self, ic_evaluation_frame):
        eva_frame = ic_evaluation_frame
        ic_ir_decay_series: pd.Series = eva_frame['normal_ir']
        best = ic_ir_decay_series.abs().dropna().idxmax()
        out = {'best_returns_lengthen': int(best), 'ir_decay_series': ic_ir_decay_series.to_list(),
               'best_ir': float(ic_ir_decay_series[best])}
        return out

    @Graph.evaluator
    def returns_for_future(self, price, holding_day):
        returns = []
        for i in holding_day:
            decay_returns = pr.np_returns(price, i)
            returns.append(decay_returns)
        return returns

    @Graph.evaluator
    def ic_evaluation_frame(self, Group_ic_decay_returns: pd.DataFrame):
        """

        Args:
            Group_ic_decay_returns:

        Returns:

        """
        results = self.ic_evaluation(Group_ic_decay_returns)
        return results

    @Graph.evaluator
    def ic_evaluation_frame_split(self, Group_ic_decay_returns: pd.DataFrame):
        """

        Args:
            Group_ic_decay_returns:

        Returns:

        """
        split_returns = pr.split(Group_ic_decay_returns)
        results = []
        for i in split_returns:
            results.append(self.ic_evaluation(i))
        return results

    @Graph.evaluator
    def ic_evaluation_frame_roll(self, Single_ic_decay_returns_roll: dict[int, DS]):
        results = {}
        for key, i in Single_ic_decay_returns_roll.items():
            data = self.ic_evaluation(i, types='time_ic')
            results[key] = data
        return results

    @Graph.evaluator
    def Group_ic_decay_returns(self, rank_or_normal, factor, returns_for_future, holding_day) -> pd.DataFrame:
        """

        Args:
            rank_or_normal:
            factor:
            returns_for_future:
            holding_day:

        Returns:
            对每一时间步上计算一个ic，返回index为时间，columns为holding_day的Dataframe
        """
        ic_decay_frame = eva.several_decay_ic(rank_or_normal=rank_or_normal, factor=factor, holding_day=holding_day,
                                              y=returns_for_future)
        return ic_decay_frame

    @Graph.evaluator
    def Single_ic_decay_returns_roll(self, factor, returns_for_future, holding_day, index, columns) -> dict:
        """

        Returns: 在时间上进行滚动ic计算，返回index为时间，columns为标的代码的Dataframe
            icir > 0.5为佳
            ic>0.05有效
            ic>0.1优秀
        """
        ic_decay_frame = eva.several_decay_ic_no_decay(factor=factor, y=returns_for_future,
                                                       holding_day=holding_day,
                                                       index=index, columns=columns)
        return ic_decay_frame

    @Graph.evaluator
    def Single_ic_decay_sharp_roll(self, factor, Single_whole_position_sharp, holding_day, index, columns) -> dict:
        ic_sharp_frame = eva.several_decay_ic_no_decay(factor=factor, y=Single_whole_position_sharp,
                                                       holding_day=holding_day,
                                                       index=index, columns=columns)
        return ic_sharp_frame


    @Graph.evaluator
    def sliding_mutual_information(self, factor: np.ndarray, returns_for_future: np.ndarray,
                                   window_size=50, bins=5, method='quantile',
                                   threshold=0.01):
        if factor.shape != returns_for_future.shape:
            raise ValueError("factor and ret must have the same shape")
        n_samples, n_features = factor.shape

        mi_scores = np.full((n_samples, n_features), np.nan)
        non_signal_mask = np.zeros((n_samples, n_features), dtype=bool)

        half_window = window_size // 2
        for ret in returns_for_future:
            for i in range(n_samples - window_size + 1):
                f_sub = factor[i:i + window_size, :]
                r_sub = ret[i:i + window_size, :]

                for j in range(n_features):
                    try:
                        f_bin = self.discretize(f_sub[:, j], bins=bins, method=method)
                        r_bin = self.discretize(r_sub[:, j], bins=bins, method=method)

                        mi = mutual_info_score(f_bin, r_bin)
                        mi_scores[i + half_window, j] = mi
                        if mi < threshold:
                            non_signal_mask[i:i + window_size, j] = True
                    except Exception:
                        continue

        return mi_scores, non_signal_mask

    @Graph.evaluator
    def rolling_mutual_info(self, factor_split, returns_for_future):
        """
        滚动计算因子与未来收益的互信息

        参数:
        factor_matrix : 2D array (T×N), 时间×标的的因子值
        return_matrix : 2D array (T×N), 时间×标的的未来收益
        window : int, 滚动窗口长度
        k : int, KNN近邻数

        返回:
        mi_series : 1D array (T,), 每个时间点的互信息值
        """
        factor = factor_split[0]
        window = 60
        k = 5
        T, N = factor.shape
        mi_series = np.full(T, np.nan)  # 初始化结果数组
        returns_for_future = returns_for_future[0]
        # 检查输入形状是否匹配
        if returns_for_future.shape != (T, N):
            raise ValueError("因子矩阵和收益矩阵形状不匹配")

        for t in tqdm(range(window, T)):
            # 获取当前窗口数据
            factor_window = factor[t - window:t].flatten()
            return_window = returns_for_future[t - window:t].flatten()

            # 移除缺失值
            mask = ~(np.isnan(factor_window) | np.isnan(return_window))
            factor_clean = factor_window[mask]
            return_clean = return_window[mask]

            # 样本量检查
            if len(factor_clean) < 100:
                mi_series[t] = np.nan
                continue

            # 计算互信息 (使用KNN方法)
            mi_series[t] = self.knn_mutual_info(factor_clean, return_clean, k=k)

        return mi_series

    @Graph.evaluator
    def special_area(self, factor: np.ndarray, returns_for_future):
        T, N = factor.shape
        factor = pr.norm_index_cumsum(factor, 0)
        fac_train, fac_valid, fac_test = pr.split(factor, False)
        # 1. 获取每列的排序索引
        sort_indices = np.argsort(fac_train, axis=0)
        # 2. 创建行索引网格
        # 创建行索引和列索引网格
        row_grid, col_grid = np.meshgrid(
            np.arange(fac_train.shape[0]),
            np.arange(fac_train.shape[1]),
            indexing='ij'
        )
        sorted_nda_1 = fac_train[(sort_indices, col_grid)]
        ic_list = []
        for returns in returns_for_future:
            ret_train, ret_valid, ret_test = pr.split(returns, False)
            ret_train_sort = ret_train[(sort_indices, col_grid)]
            mask = eva.several_sort_area(factor=sorted_nda_1, y=ret_train_sort)
            # # 取值（只对有效的列）
            # first_values = np.full(N, np.nan)
            # last_values = np.full(N, np.nan)
            # first_values[valid_mask] = sorted_nda_1[first[valid_mask], col_idx[valid_mask]]
            # last_values[valid_mask] = sorted_nda_1[last[valid_mask], col_idx[valid_mask]]
            # # 构造广播形状 (T, N)，即每列对应自己的 first/last
            # first_matrix = first_values.reshape(1, -1)  # shape (1, N)
            # last_matrix = last_values.reshape(1, -1)  # shape (1, N)
            # # 构造 mask：大于 first 且小于 last 的区域为 True
            # mask = (sorted_nda_1 > first_matrix) & (sorted_nda_1 < last_matrix)
            # 应用 mask，将对应部分设为 0
            masked_fac_train = sorted_nda_1.copy()
            masked_ret_train = ret_train_sort.copy()
            masked_fac_train[mask] = np.nan
            masked_ret_train[mask] = np.nan
            train_ic = EvaluationMetrics.N_normal_ic_0(masked_fac_train, masked_ret_train)
            masked_fac_valid = fac_valid.copy()
            masked_fac_valid[mask] = 0
            valid_ic = EvaluationMetrics.N_roll_ic(masked_fac_valid, ret_train_sort)
            masked_fac_test = fac_test.copy()
            masked_fac_test[mask] = 0
            test_ic = EvaluationMetrics.N_roll_ic(masked_fac_test, ret_train_sort)
            ic_list.append([train_ic, valid_ic, test_ic])
        return ic_list
