"""
    ================================================================================
                            ------------utf-8--------------
    ================================================================================
@Author: 
    rfdsg
@Create Time: 
    2024/9/23 - 23:14
@Description:

@Attention:
    
"""
import base64
import hashlib
import json
import logging
import os
import uuid
import warnings
import zlib
from functools import wraps
from typing import Union, Optional, Literal, List, Generator, Tuple
import numpy as np
import pandas as pd
from scipy.ndimage import label, find_objects
from sklearn.model_selection import train_test_split
from util.easy_type import DSN, DS

logger = logging.getLogger(__name__)


def on_progress(datas):
    print(datas)


def copied_function(source_func):
    def decorator(target_func):
        @wraps(source_func)  # 让 IDE 显示 docstring 等属性
        def wrapper(*args, **kwargs):
            return target_func(*args, **kwargs)

        wrapper.__doc__ = source_func.__doc__
        return wrapper

    return decorator


# 修饰器，用于捕获函数中的警告
def catch_warnings(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        # 捕获运行时的警告
        with warnings.catch_warnings(record=True) as w:
            warnings.simplefilter("always")  # 捕获所有警告

            try:
                # 执行原始函数
                result = func(*args, **kwargs)
            except Exception as e:
                # 捕获并记录所有异常，避免函数崩溃
                log_message = f"Error encountered: {str(e)}. Input data: args={args}, kwargs={kwargs}"
                logging.error(log_message)
                raise e  # 继续抛出异常

            # 检查是否有警告
            if len(w) > 0:
                for warning in w:
                    # 打印警告信息
                    log_message = f"Warning: {warning.message}. Input data: args={args}, kwargs={kwargs}"
                    logging.warning(log_message)
                    # 抛出异常

                    # 提供数据的简要视图
                    if isinstance(args[0], np.ndarray):
                        array_preview = f"Array preview: {args[0][:10]}...{args[0][-10:]}"
                        logging.warning(array_preview)

                        # 将完整数据保存到文件中
                        np.save('warning_data.npy', args[0])
                        logging.warning("Complete input data saved to 'warning_data.npy'")
                    if warning == RuntimeWarning:
                        raise RuntimeError(f"Captured warning: {warning.message}")
            return result

    return wrapper


def time_split(indicator: pd.Series, n: int = 1, fre: str = 'T') -> pd.DatetimeIndex:
    return indicator.resample(f'{n}' + fre).last().dropna()


def split(indicator: np.ndarray, n: int = 1) -> np.ndarray:
    """

    Args:
        indicator(np.ndarry): 传入的分割指标

        n(int):  分割频率

    Returns:
        np.ndarry: 经过分割的数据

    """
    total_length = indicator.shape[0]
    split_length = total_length // n
    total_length = split_length * n
    split_data = indicator[:total_length].reshape((split_length, n, -1, -1))
    return split_data


def align_schedule(factor: pd.Series, begin_time: str = None,
                   end_time: str = None, calendars=None) -> pd.Series:
    """

    Args:
        end_time:
        begin_time:
        factor(pd.Series): 因子的时间序列
        calendars:

    Returns:
        pd.Series: 对齐后的因子时间序列

    """
    # 两列进行拼接

    target_data = pd.concat([calendars.schedule.open, factor], axis=1, join='outer')
    # isna表示筛选出空值, notnull表示非空值：
    index = target_data.iloc[:, 0].isna()
    # # 选出第一个对齐的数据之后的数据
    # index = index[index[~index].index[0]:]
    # target_data = target_data[target_data.index >= index.index[0]]
    # 再选出没对齐的
    index = index[index]
    # 拼接后检查returns列是否有空值
    if index.any():
        for time in index.index:
            index[time] = calendars.date_to_session(time, 'next') - time
            target_data.loc[time + index[time], :] = \
                target_data.loc[time + index[time], :].combine_first(target_data.loc[time, :])
        target_data.dropna(subset=['open'], axis=0, inplace=True, how='any')
    target_data = target_data.iloc[:, 1:]
    return target_data.loc[begin_time: end_time, :]
    # 循环读取并每一个时间修改为下一交易日


def norm_for_pd(factor):
    # 计算均值
    mean_data = factor.mean(axis=0)

    # 计算标准差
    std_data = factor.std(axis=0)  # 使用ddof=1来得到样本标准差

    # 防止除以零的错误（尽管这在大多数情况下不会发生，但最好还是检查一下）
    std_data[std_data == 0] = 1

    # 进行Z-Score标准化
    factor = (factor - mean_data) / std_data
    factor = np.clip(factor, -1, 1)
    return factor


# @catch_warnings
def clip_for_overflow_and_norm(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        # 执行原始函数并获取结果
        result = func(*args, **kwargs)

        # 对结果进行溢出限制处理
        result = np.clip(result, -1e100, 1e100)

        # 计算均值和标准差
        mean = np.nanmean(result)
        std = np.nanstd(result)

        if np.all(std == 0):
            standardized_data = np.zeros_like(result)  # 若标准差为零，则标准化后的数据为0
        else:
            standardized_data = (result - mean) / std
        return standardized_data

    return wrapper


def split_valid(data: Union[pd.Series, np.ndarray]):
    # 5:3:2
    data_train, data_test = train_test_split(data, train_size=0.4, shuffle=False)
    data_valid, data_test = train_test_split(data_test, train_size=0.6, shuffle=False)
    return data_train, data_valid, data_test


def split_only_test(data: Union[pd.Series, np.ndarray, pd.DataFrame]):
    data_train, data_test = train_test_split(data, train_size=0.5, shuffle=False)
    return data_train, data_test


def split_year(net_indicator: np.ndarray, time: pd.Series):
    # 年份列表
    split_list_year = time.dt.year.unique()
    # for将net_indicator按年份拆（只用拆一个，就能得到掩码）
    for i in split_list_year:
        min_index = time.index.get_loc(time[time.dt.year == i][0])
        max_index = time.index.get_loc(time[time.dt.year == i][-1])
        median = np.pad(net_indicator[min_index:max_index, :, 3],
                        ((min_index, net_indicator.shape[0] - max_index), (0, 0)),
                        'constant', constant_values=np.nan)
        net_indicator = np.concatenate((net_indicator, median[:, :, np.newaxis]), axis=2)
    split_list_year = [str(num) for num in split_list_year]
    split_list = ["训练集", '验证集', '测试集', '总体'] + split_list_year
    return split_list, net_indicator


def mask_nan(data):
    return ~np.isnan(data)


def align_drop(data: Union[pd.Series, pd.DataFrame], cal_return: Optional[pd.Series]):
    """

    Args:
        data:
        cal_return:

    Returns:
        factor, y
    """
    merge_data = pd.concat([data, cal_return], axis=1)
    merge_data.dropna(axis=0, inplace=True)
    data = merge_data.iloc[:, :-1]
    return data, merge_data.iloc[:, -1]


def out_types_only_test(y_train, y_test, lens, judge: bool = True):
    if judge:
        num_dimensions = y_train.ndim
        max_len = lens
        if num_dimensions == 1:
            y_train = np.pad(y_train, (0, max_len - y_train.size), 'constant', constant_values=np.nan)
            y_test = np.pad(y_test, (max_len - y_test.size, 0), 'constant', constant_values=np.nan)
            data = np.hstack((y_train[:, np.newaxis], y_test[:, np.newaxis]))
        elif num_dimensions == 2:
            y_train = np.pad(y_train, ((0, max_len - y_train.shape[0]), (0, 0)), 'constant', constant_values=np.nan)
            y_test = np.pad(y_test, ((max_len - y_test.shape[0], 0), (0, 0)), 'constant', constant_values=np.nan)
            data = np.concatenate((y_train[:, :, np.newaxis], y_test[:, :, np.newaxis]), axis=2)
        else:
            raise ValueError('提供数据非一维或二维ndarry')
    else:
        data = {'训练集': y_train, '测试集': y_test}
    return data



class Process:
    def __init__(self):
        pass

    @staticmethod
    def split_valid(data: DSN, split_threshold: Tuple[float, float] = (0.5, 0.3), need_whole: bool = False):
        """

        Args:
            need_whole:
            data:
            split_threshold: 填入训练集和验证集比例

        Returns:

        """
        # 5:3:2
        split_0 = split_threshold[0]
        split_1 = split_threshold[1]
        data_train, data_test = train_test_split(data, train_size=split_0, shuffle=False)
        data_valid, data_test = train_test_split(data_test, train_size=split_1 / split_0, shuffle=False)
        if need_whole:
            return data_train, data_valid, data_test, data
        return data_train, data_valid, data_test

    @staticmethod
    def decimal_ensure(data: Union[pd.DataFrame, np.ndarray, pd.Series]):
        return np.round(data, decimals=2)

    @staticmethod
    def split_as(data_sample: List, data: DSN):
        split_threshold_1, split_threshold_2, split_threshold_3 = (
            data_sample[0].shape[0], data_sample[1].shape[0], data_sample[2].shape[0])
        return (data[:split_threshold_1],
                data[split_threshold_1:split_threshold_2 + split_threshold_1], data[-split_threshold_3:])

    @staticmethod
    def out_types(train: np.ndarray, valid: np.ndarray, test: np.ndarray, lens: int, judge: bool = True):
        """

        Args:

            train: 训练集数据
            valid: 验证集数据
            test: 测试集数据
            lens: 总体长度
            judge: 返回ndarry或dict，True为ndarry，默认为True

        Returns:
            拼接后数据，为两种类型
                ndarry:三列分别为train，valid,test
                dict，按名字匹配

        """
        if judge:
            num_dimensions = train.ndim
            max_len = lens
            if num_dimensions == 2:
                train = np.pad(train, ((0, max_len - train.shape[0]), (0, 0)), 'constant', constant_values=np.nan)
                valid = np.pad(valid, ((max_len - test.shape[0] - valid.shape[0], test.shape[0]), (0, 0)), 'constant',
                               constant_values=np.nan)
                test = np.pad(test, ((max_len - test.shape[0], 0), (0, 0)), 'constant', constant_values=np.nan)
                data = np.concatenate((train[:, :, np.newaxis], valid[:, :, np.newaxis], test[:, :, np.newaxis]),
                                      axis=2)
            # dim==3可能不可用
            elif num_dimensions == 1:
                train = np.pad(train, ((0, max_len - train.shape[0]), (0, 0)), 'constant', constant_values=np.nan)
                valid = np.pad(valid, ((max_len - test.shape[0] - valid.shape[0], test.shape[0]), (0, 0)),
                               'constant',
                               constant_values=np.nan)
                test = np.pad(test, ((max_len - test.shape[0], 0), (0, 0)), 'constant', constant_values=np.nan)
                data = np.concatenate((train[:, :, np.newaxis], valid[:, :, np.newaxis], test[:, :, np.newaxis]),
                                      axis=2)
            else:
                raise ValueError('需提供数据非一维或二维ndarry')
        else:
            data = []
            data.extend([train, valid, test])
        return data

    @staticmethod
    def concat(train: np.ndarray, valid: np.ndarray, test: np.ndarray, ):
        num_dimensions = train.ndim
        if num_dimensions == 2:
            data = np.vstack((train, valid, test))
        # dim==3可能不可用
        elif num_dimensions == 3:
            data = np.concatenate((train[:, :, np.newaxis], valid[:, :, np.newaxis], test[:, :, np.newaxis]),
                                  axis=2)
        else:
            raise ValueError('需提供数据非一维或二维ndarry')
        return data

    @staticmethod
    def trans_to_np(data):
        if isinstance(data, (pd.DataFrame, pd.Series)):
            return data.values
        else:
            return data

    @staticmethod
    def create_whole(data: Union[np.ndarray, list]):
        """
        Information:
            处理的都是np.ndarry

        Args:
            data:

        Returns:

        """
        if isinstance(data, np.ndarray):
            dim = data.ndim
            if dim == 2:
                data = np.concatenate((data, data[:, 0][:, np.newaxis]), axis=1)
                data[:, 3] = np.nanmin(data, axis=1)
            elif dim == 3:
                data = np.concatenate((data, data[:, :, 0][:, :, np.newaxis]), axis=2)
                data[:, :, 3] = np.nanmin(data, axis=2)
            else:
                raise ValueError('维度错误')
        elif isinstance(data, list):
            data.append(np.concatenate(data, axis=0))
        return data

    @staticmethod
    def norm_for_axis(data: np.ndarray, axis: int = 1):
        """

        Args:
            axis:
            data:

        Returns:
            限制了值域为-1~1
        """
        # 计算均值
        mean_data = np.nanmean(data, axis=axis, keepdims=True)

        # 计算标准差，ddof=1 是样本标准差
        std_data = np.nanstd(data, axis=axis, keepdims=True)

        # 防止除以零的错误
        std_data[std_data == 0] = 1

        # 进行Z-Score标准化
        factor_np = (data - mean_data) / std_data

        # 限制值域范围为[-1, 1]
        factor_np = np.clip(factor_np, -1, 1)
        return factor_np

    @staticmethod
    def norm_index_cumsum(data: np.ndarray, min_periods: int = 40):
        """

        Args:
            data:
            min_periods:

        Returns:

        """
        T = data.shape[0]

        # expanding mean
        cumsum = np.cumsum(data, axis=0)
        count = np.arange(1, T + 1).reshape(-1, *([1] * (data.ndim - 1)))  # shape = (T, 1)
        count = np.broadcast_to(count, data.shape)
        mean = cumsum / count

        # expanding std（使用 E[x^2] - E[x]^2）
        cumsum_sq = np.cumsum(data ** 2, axis=0)
        mean_sq = cumsum_sq / count
        std = np.sqrt(mean_sq - mean ** 2)

        # z-score
        zscore = (data - mean) / std

        # 对于前 min_periods 行设置为 nan
        zscore[:min_periods] = np.nan

        return zscore

    @staticmethod
    def norm_for_pd(data: Union[pd.DataFrame, pd.Series]):
        # 计算均值
        mean_data = data.mean(axis=0)

        # 计算标准差
        std_data = data.std(axis=0)  # 使用ddof=1来得到样本标准差

        # 防止除以零的错误（尽管这在大多数情况下不会发生，但最好还是检查一下）
        std_data[std_data == 0] = 1

        # 进行Z-Score标准化
        data = (data - mean_data) / std_data
        data = np.clip(data, -1, 1)
        return np.round(data, 2)

    @staticmethod
    def norm_for_columns(factor: pd.DataFrame):
        # 计算均值
        mean_data = factor.mean(axis=1).values[:, None]

        # 计算标准差
        std_data = factor.std(axis=1).values[:, None]  # 使用ddof=1来得到样本标准差

        # 防止除以零的错误（尽管这在大多数情况下不会发生，但最好还是检查一下）
        std_data[std_data == 0] = 1

        # 进行Z-Score标准化
        factor = (factor - mean_data) / std_data
        factor = np.clip(factor, -1, 1)
        return factor

    @staticmethod
    def close_now_div_yes_in_now(data: DS):
        market_data: pd.DataFrame = data
        market_data = market_data.div(market_data.shift(1))
        print('close now div yes in now finish ')
        return market_data.round(4)

    @staticmethod
    def preprocess(data: pd.DataFrame):
        """
        数据预处理，将因子值和价格数据格式转换为 Alphalens 需要的格式
                """
        # 1. 过滤上市时间过晚的标的：判断每个标的的第一个有效日期是否早于分析起始日
        analysis_start_date = pd.to_datetime("2011-01-01")

        # 对于每个标的（每一列），找出第一个非 NaN 的日期
        first_valid_date = data.apply(lambda col: col.first_valid_index())

        # 保留上市日期在分析起始日之前（或等于）的标的
        assets_by_listing = first_valid_date[first_valid_date <= analysis_start_date].index
        data = data[assets_by_listing]

        # 2. 过滤停牌时间过长的标的：可以计算每个标的在整个样本内的缺失比例
        missing_ratio = data.isna().mean()  # 每一列的缺失比例

        # 设定阈值（例如：缺失比例小于 90%）
        max_missing_ratio = 0.1
        assets_by_missing = missing_ratio[missing_ratio < max_missing_ratio].index

        data = data.ffill()
        data.sort_index(inplace=True)
        data: pd.DataFrame = data.loc['2011-01-01':, assets_by_missing]
        return data

    @staticmethod
    def preprocess_mul(data: pd.DataFrame):
        """
        数据预处理，将多层索引数据转换为 Alphalens 需要的格式
        - 列索引需为双层结构：level 0为标的代码，level 1为字段（如'open'）
        """
        analysis_start_date = pd.to_datetime("2011-01-01")

        # 验证列是否为多层索引
        if not isinstance(data.columns, pd.MultiIndex):
            raise ValueError("列需为双层索引，level 0为标的代码，level 1为字段")

        # 1. 按标的过滤上市时间
        def get_asset_first_date(asset_df: pd.DataFrame):
            """计算标的各字段的最早有效日期"""
            dates = [asset_df[field].first_valid_index() for field in asset_df.columns]
            valid_dates = [d for d in dates if d is not None]
            return min(valid_dates) if valid_dates else pd.NaT

        # 按标的分组获取最早有效日
        first_valid_dates = data.groupby(level=0, axis=1).apply(get_asset_first_date)
        valid_assets = first_valid_dates[first_valid_dates <= analysis_start_date].index
        data = data.loc[:, pd.IndexSlice[valid_assets, :]]

        # 2. 按标的过滤缺失率
        missing_ratio = data.isna().mean()
        asset_missing = missing_ratio.groupby(level=0).max()  # 取标的各字段最大缺失率
        valid_assets = asset_missing[asset_missing < 0.1].index
        data = data.loc[:, pd.IndexSlice[valid_assets, :]]

        # 前填充并截取时间段
        data = data.ffill().loc[analysis_start_date:]

        return data

    @staticmethod
    def group_from_back(factor: pd.DataFrame, sharpe_back: pd.DataFrame, n_bins: int = 5):
        percentiles = factor.rank(axis=1, pct=True).to_numpy()  # shape=(T, N)
        sharpe_np = sharpe_back.to_numpy()  # shape=(T, N)

        bins = np.linspace(0, 1, n_bins + 1)
        bins[-1] += 1e-8  # 包含最右边

        groups = np.digitize(percentiles, bins, right=False) - 1  # shape=(T, N)

        result = np.full((factor.shape[0], n_bins), np.nan)

        for g in range(n_bins):
            mask = (groups == g)
            # 对每个时间点计算对应组的sharpe均值
            # sum 和 count 都是二维矩阵操作，按行计算
            numerator = np.nansum(np.where(mask, sharpe_np, np.nan), axis=1)  # 每行组内sharpe和
            denominator = mask.sum(axis=1)  # 每行组内元素个数

            # 防止除零
            with np.errstate(divide='ignore', invalid='ignore'):
                mean_sharpe = numerator / denominator
                mean_sharpe[denominator == 0] = np.nan

            result[:, g] = mean_sharpe

        return pd.DataFrame(result, index=factor.index, columns=[f'group_{i}' for i in range(n_bins)])

    @staticmethod
    def corr_select(data: pd.DataFrame):
        corr_matrix = data.corr().abs()
        # 选择上三角矩阵，避免重复计算
        upper_tri = np.triu(np.ones(corr_matrix.shape), k=1)

        # 找到高度相关的因子（>0.9）
        high_corr_pairs = (corr_matrix > 0.75) & upper_tri

        # 统计每个因子被高度相关影响的次数
        high_corr_counts = high_corr_pairs.sum(axis=1)

        # 去掉相关性较高的因子
        redundant_factors = high_corr_counts[high_corr_counts > 0].index.tolist()

        # 过滤掉这些因子
        filtered_stack = data.drop(columns=redundant_factors)

        print(f"去除 {len(redundant_factors)} 个因子，最终保留 {filtered_stack.shape[1]} 个因子")
        return filtered_stack

    @staticmethod
    def returns(data: Union[pd.DataFrame, pd.Series],
                periods: int = 1,
                freq=None,
                **kwargs: Union[Literal["index", "columns", "rows"], int],
                ):
        axis = data._get_axis_number(kwargs.pop("axis", "index"))
        shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs)
        # Unsupported left operand type for / ("Self")
        shifted = abs(shifted)
        rs: Union[pd.DataFrame, pd.Series] = data / shifted - 1  # type: ignore[operator]
        rs = rs.shift(-periods)
        return rs

    @staticmethod
    def absolute_true_future_returns(
            data: Union[pd.DataFrame, pd.Series],
            periods: int = 1,
            freq=None,
            **kwargs: Union[Literal["index", "columns", "rows"], int],
    ):
        """

        Args:
            data:
            periods:
            freq:
            **kwargs:

        Returns:
            period为正时，它为period天后比上今天的再减一!!!!!!!!!!注意，有-1的处理，并非单纯的除
        """
        axis = data._get_axis_number(kwargs.pop("axis", "index"))
        shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs)
        # Unsupported left operand type for / ("Self")
        rs: Union[pd.DataFrame, pd.Series] = data / shifted - 1  # type: ignore[operator]
        rs = rs.shift(-periods)
        return rs

    @staticmethod
    def np_returns(data: np.ndarray,
                   periods: int = 1,
                   axis=0):
        """

        Args:
            data:
            periods:
            axis:
        Returns:
            period为正时，它为period天后比上今天的再减一!!!!!!!!!!注意，有-1的处理，并非单纯的除
        """
        shifted = np.roll(data, -periods, axis=axis)
        rs: np.ndarray = shifted / data - 1  # type: ignore[operator]
        rs[-periods:] = np.nan
        return rs

    @staticmethod
    def future_div(
            data: Union[pd.DataFrame, pd.Series],
            periods: int = 1,
            freq=None,
            **kwargs: Union[Literal["index", "columns", "rows"], int],
    ):
        """

        Args:
            data:
            periods:
            freq:
            **kwargs:

        Returns:
            period为正时，它为period天后比上今天的再减一!!!!!!!!!!注意，有-1的处理，并非单纯的除
        """
        axis = data._get_axis_number(kwargs.pop("axis", "index"))
        shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs)
        # Unsupported left operand type for / ("Self")
        rs: Union[pd.DataFrame, pd.Series] = data / shifted  # type: ignore[operator]
        rs = rs.shift(-periods)
        return rs

    @staticmethod
    def absolute_true_returns_for_inverse(
            data: Union[pd.DataFrame, pd.Series],
            periods: int = 1,
            freq=None,
            **kwargs: Union[Literal["index", "columns", "rows"], int],
    ):
        axis = data._get_axis_number(kwargs.pop("axis", "index"))
        shifted = data.shift(periods=periods, freq=freq, axis=axis, **kwargs)
        # Unsupported left operand type for / ("Self")
        rs: Union[pd.DataFrame, pd.Series] = data / shifted - 1  # type: ignore[operator]
        rs = rs.shift(-periods)
        return rs

    @staticmethod
    def insert_zeros_before_first_valid(x: np.ndarray) -> np.ndarray:
        x = np.asarray(x, dtype=np.float64)
        T, *rest_shape = x.shape
        x_flat = x.reshape(T, -1)  # shape = (T, N)
        T, N = x_flat.shape

        # 找到第一个非 NaN 索引（向量化）
        is_not_nan = ~np.isnan(x_flat)
        first_valid = np.argmax(is_not_nan, axis=0)  # shape = (N,)
        insert_pos = np.maximum(first_valid, 0)

        # 创建输出
        out = np.full((T + 1, N), np.nan)

        for i in range(N):
            col = x_flat[:, i]
            ipos = insert_pos[i]

            out[:ipos, i] = col[:ipos]  # 前部分
            out[ipos, i] = 0  # 插入0
            out[ipos + 1:, i] = col[ipos:]  # 后部分

        # reshape 回原 shape
        return out.reshape((T + 1,) + tuple(rest_shape))

    @staticmethod
    def inverse_relative_returns(
            data: Union[pd.DataFrame, pd.Series],
            periods: int = 1,
            freq=None,
            **kwargs: Union[Literal["index", "columns", "rows"], int],
    ):
        """

            Args:
                data: 数据，注意，非常建议输入价格
                periods: 收益窗口周期
                freq: shift的freq
                **kwargs: shift的kwargs

            Returns:

        """
        inverse_price = (data.iloc[-1] - data)
        # inverse_price表示为在该点购入后持有到最后一天的收益
        returns = Process.absolute_true_future_returns(inverse_price, periods=periods, freq=freq, **kwargs)
        return returns

    @staticmethod
    def true_cumprod_returns(data: DS,
                             periods: int = 1,
                             freq=None,
                             **kwargs: Union[Literal["index", "columns", "rows"], int], ):
        cumprod_data = data.cumprod()
        return Process.absolute_true_future_returns(cumprod_data, periods, freq=freq, **kwargs)

    @staticmethod
    def inverse_relative_values(data: Union[pd.DataFrame, pd.Series]):
        """

        Args:
            data: 数据，注意，非常建议输入价格

        Returns:

        """
        inverse_price = (data.iloc[-1] - data)
        return inverse_price

    @staticmethod
    def multindex_to_3d_np(data: pd.DataFrame):

        data_column = data.columns.get_level_values(0).unique()
        data = np.stack([
            data.xs(name, level=0, axis=1).values
            for name in data_column
        ], axis=2)
        return data

    @staticmethod
    def are_rows_unique_vectorized(tensor):
        if tensor.dim() < 2:
            tensor = tensor.unsqueeze(0)
        import torch
        # 对每行排序后，检查相邻元素是否重复
        sorted_tensor, _ = torch.sort(tensor, dim=1)
        duplicates = (sorted_tensor[:, 1:] == sorted_tensor[:, :-1]).any(dim=1)
        return ~duplicates

    @staticmethod
    def np_stack(data: List[np.ndarray]):
        T, M = data[0].shape  # 时间步数 & 标的数量
        factor_matrix = np.stack(data, axis=-1)
        factor_matrix = Process.norm_for_axis(factor_matrix, axis=1)
        factor_matrix = factor_matrix.reshape(T * M, -1).astype(np.float32)
        data = pd.DataFrame(factor_matrix)
        #  返回展开后的数据，行是时间步，列是因子
        return data

    @staticmethod
    def np_stack_large(data: List[np.ndarray]):
        T, M = data[0].shape  # 时间步数 & 标的数量
        factor_matrix = np.stack(data, axis=-1)
        factor_matrix = Process.norm_for_axis(factor_matrix, axis=1)
        factor_matrix = factor_matrix.reshape(T * M, -1)
        data = pd.DataFrame(factor_matrix)
        #  返回展开后的数据，行是时间步，列是因子
        tmp_path = f"temp_results/{uuid.uuid4().hex}.pkl"
        os.makedirs("temp_results", exist_ok=True)
        data.to_pickle(tmp_path)
        return tmp_path  # 只返回路径

    @staticmethod
    def one_np_stack(data: np.ndarray) -> pd.Series:
        factor_matrix = data.flatten()
        data = pd.Series(factor_matrix)
        return data

    @staticmethod
    def convert_signal(df):
        series = df.stack()
        series.index.set_names(["datetime", "instrument"], inplace=True)
        return series

    @staticmethod
    def tensor_to_np(datas: Generator):
        results = []
        for data in datas:
            results.extend([i.cpu().numpy() for i in data])
        return results

    @staticmethod
    def unflatten(data: np.ndarray, columns, index):
        data = pd.DataFrame(data.reshape(-1, len(columns)), columns=columns, index=index)
        return data

    @staticmethod
    def flatten_for_pd(data: DS, columns, index):
        data = pd.DataFrame(data.reshape(-1, len(columns)), columns=columns, index=index)
        return data

    @staticmethod
    def true_returns(data: DS, preclose: DS):
        true_returns = data / preclose
        return true_returns

    @staticmethod
    def encode_to_hash(data):
        """
        将 str 或 list 编码为合法文件名，可逆
        """
        if isinstance(data, str):
            obj = {"type": "str", "data": data}
        elif isinstance(data, list):
            obj = {"type": "list", "data": data}
        else:
            raise TypeError("Only str and list are supported")

        json_str = json.dumps(obj, separators=(',', ':'))  # 更紧凑
        compressed = zlib.compress(json_str.encode())
        b64 = base64.urlsafe_b64encode(compressed).decode().rstrip('=')
        hashes = Process.hash(b64)
        return hashes

    @staticmethod
    def encode_to_b64(data):
        json_str = json.dumps(data, separators=(',', ':'))  # 更紧凑
        compressed = zlib.compress(json_str.encode())
        b64 = base64.urlsafe_b64encode(compressed).decode().rstrip('=')
        return b64

    @staticmethod
    def decode_from_b64(data):
        padded = data + '=' * (-len(data) % 4)  # base64 padding
        compressed = base64.urlsafe_b64decode(padded)
        json_str = zlib.decompress(compressed).decode()
        obj = json.loads(json_str)
        return obj

    @staticmethod
    def decode_from_filename(filename):
        """
        解码文件名，自动还原为原始类型（str 或 list）
        """
        padded = filename + '=' * (-len(filename) % 4)  # base64 padding
        compressed = base64.urlsafe_b64decode(padded)
        json_str = zlib.decompress(compressed).decode()
        obj = json.loads(json_str)

        if obj["type"] == "str":
            return obj["data"]
        elif obj["type"] == "list":
            return obj["data"]
        else:
            raise ValueError("Unknown data type")

    @staticmethod
    def hash(name: Union[str, List]):
        if isinstance(name, str):
            name = name
        elif isinstance(name, list):
            json_str = json.dumps(name, separators=(',', ':'))  # 更紧凑
            compressed = zlib.compress(json_str.encode())
            b64 = base64.urlsafe_b64encode(compressed).decode().rstrip('=')
            name = b64
        return hashlib.md5(name.encode()).hexdigest()

    @staticmethod
    def rolling_turn_sharpe(x: np.ndarray, window: int = 20) -> np.ndarray:
        """
        计算 rolling Sharpe ratio（滑窗夏普比），基于 log return。

        Args:
            x: shape = (n_dates, n_assets)，每列是一个资产的净值
            window: int，滑动窗口大小

        Returns:
            shape = (n_dates, n_assets)，每个窗口的夏普比率（前 window-1 行为 nan）
        """
        T, N = x.shape
        result = np.full((T, N), np.nan)

        if T < window:
            return result
        from numpy.lib.stride_tricks import sliding_window_view
        # 滑窗视图 (n_dates - window + 1, window, n_assets)
        x_win = sliding_window_view(x, window_shape=window, axis=0)  # shape: (T - w + 1,N, window)
        # log_return = np.empty(T, N)
        # 计算 log return: (w-1, n_assets)
        log_return = np.empty((T - window + 1, N, window))
        x_win_turn = x_win[:, :, ::-1]
        log_return[:, :, 1:] = np.log(x_win_turn[:, :, 1:] / x_win_turn[:, :, :-1])  # shape: (T - w + 1, N, w-1)
        log_return[np.isnan(log_return)] = 0

        # 年化收益率
        total_return = x_win_turn[:, :, -1] / x_win_turn[:, :, 0] - 1
        annual_ret = np.power(1 + total_return, 252 / window) - 1

        # 年化波动率
        std_ret = np.nanstd(log_return, axis=-1, ddof=1)
        annual_vol = np.sqrt(252) * std_ret

        # 夏普比率
        with np.errstate(divide='ignore', invalid='ignore'):
            sharpe = annual_ret / annual_vol
            sharpe = np.where(annual_vol == 0, np.nan, sharpe)

        result[: 1 - window] = sharpe[::-1]
        return result

    @staticmethod
    def whole_position_sharp(price: DS, holding_day: int):
        price_cal = price[::-1]
        sharpe = Process.rolling_turn_sharpe(price_cal.values, holding_day)
        sharpe = np.clip(sharpe, -1000, 1000)
        sharpe = pd.DataFrame(sharpe, index=price.index, columns=price.columns)
        return sharpe

    @staticmethod
    def split(data: DSN, need_whole: bool = True):
        """

        Information:
            对self.factor进行分割后保留值得到self.deal_index

        Returns:
            None
        """
        # 0.8为分割比例
        out = Process.split_valid(data, need_whole=need_whole)
        return out

    @staticmethod
    def useful_in_index(data: DSN, nan_or_zero: bool = True):
        data_quantile_no_useful = (data < np.quantile(data, 0.75, axis=0)) & (data > np.quantile(data, 0.25, axis=0))
        if nan_or_zero:
            data[data_quantile_no_useful] = np.nan
        else:
            data[data_quantile_no_useful] = 0
        return data

    @staticmethod
    def keep_longest_true_segment(mask: np.ndarray, windows: int, min_length: int = 10) -> np.ndarray:
        mask = mask.astype(int)
        diff = np.diff(np.concatenate(([0], mask, [0])))
        start_idx = np.where(diff == 1)[0]
        end_idx = np.where(diff == -1)[0]

        lengths = end_idx - start_idx
        # 过滤掉不满足 min_length 的段
        keep = lengths >= min_length
        starts_to_keep = start_idx[keep]
        ends_to_keep = end_idx[keep]
        # 构造最终结果
        new_mask = np.zeros_like(mask, dtype=bool)
        for start, end in zip(starts_to_keep, ends_to_keep):
            new_mask[start - windows:end] = True
        return new_mask
        # max_idx = np.argmax(lengths)
        # start, end = start_idx[max_idx], end_idx[max_idx]
        #
        # new_mask = np.zeros_like(mask, dtype=bool)
        # new_mask[start:end] = True
        # return new_mask

    # @staticmethod
    # def close_n_div_now_in_now(data: pd.Series, n: int):
    #     """
    #
    #     Args:
    #         data:
    #         n:
    #
    #     Returns:
    #         Notes
    #     """
    #     market_data: pd.Series = data
    #     index = market_data.index.get_indexer([pd.to_datetime(now)], 'nearest')[0]
    #     data: pd.DataFrame = market_data.loc[pd.to_datetime(last_day):, :]
    #     # 超出index最大值是没关系的
    #     data = data.iloc[:index + n + 2, :]
    #     data = data.shift(-n) / data
    #     data = data.iloc[:index, :]
    #     print(f'close_{n}_div_now_in_now')
    #     return data.round(4)


def rolling_norm_to_returns(data: Union[pd.DataFrame, pd.Series]):
    result: pd.DataFrame = np.clip(data, -1e100, 1e100)
    result_mean = result.rolling(window=252, min_periods=1).mean()
    result_std: pd.Series = result.rolling(window=252, min_periods=1).std()
    result_std.bfill(inplace=True)
    result_std.replace(0, result_std.mean(axis=0), inplace=True)
    result = (result - result_mean) / result_std
    result += 1  # 映射到收益率的形式
    return result


def remove_short_sequences(data: np.ndarray, min_length):
    # 使用 `label` 来标记连续的 True 或 False 块
    labeled_array, num_features = label(data)

    # 创建副本，避免修改原数组
    filtered_data = data.copy()

    # 找出每个连续块的起止位置
    for slice_obj in find_objects(labeled_array):
        # 如果当前块的长度小于 `min_length`，则将其值更改为相反的布尔值
        if (slice_obj[0].stop - slice_obj[0].start) < min_length:
            filtered_data[slice_obj] = not data[slice_obj[0].start]

    return filtered_data


pr = Process()
