"""
    ================================================================================
                            ------------utf-8--------------
    ================================================================================
@Author: 
    rfdsg
@Create Time: 
    2024/9/24 - 23:53
@Description:

@Attention:
    
"""
import inspect
import os
import time
from copy import deepcopy
from functools import wraps
from typing import List, Union, Optional, cast, Tuple, Callable

import numpy as np
import pandas as pd
import torch
from joblib import Parallel, delayed
from tqdm import tqdm


class OperatorClass:
    def __init__(self):
        """
        当self.default_data_type有大于等于两个时，必须有包含列名的输入数据列、表
        """
        self.default_parameter = {}
        self.default_data_type = []
        self.txt = ''

    def set_txt(self, txt: str):
        self.txt = txt

    @staticmethod
    def _replace_in_dict(original_dict, replacement_dict) -> {str: str}:
        original_dict = deepcopy(original_dict)
        # 遍历原始字典的键
        for key in original_dict.keys():  # 使用list()确保在迭代过程中可以修改字典
            # 检查键是否在替换字典中
            if key in replacement_dict:
                # 如果在，就用替换字典中的值替换原始字典中的值
                original_dict[key] = replacement_dict[key]
                # 注意：这个函数直接修改了原始字典，如果你不想修改原始字典，可以先复制一份
        return original_dict  # 尽管这里返回了原始字典，但函数的主要目的是修改它

    def _join_parameter(self, input_dict: dict):
        if not dict:
            return 0
        return self._replace_in_dict(self.default_parameter, input_dict)

    @staticmethod
    def decorate_type(func):
        @wraps(func)
        def wrapper(self, *args, **kwargs):
            param_dict = self._join_parameter(kwargs)
            result = func(self, *args, **param_dict)
            # 对结果进行溢出限制处理
            result: pd.DataFrame = np.clip(result, -1e100, 1e100)
            result_mean = result.rolling(window=252, min_periods=1).mean()
            result_std: pd.Series = result.rolling(window=252, min_periods=1).std()
            result_std.bfill(inplace=True)
            result_std[result_std == 0] = result_std.mean()
            result = (result - result_mean) / result_std
            result += 1  # 映射到收益率的形式
            class_name = self.__class__.__name__
            param_name = '_' + '_'.join(str(value) for value in param_dict.values())
            result.name = class_name + param_name
            return result

        return wrapper

    @staticmethod
    def kwarg_decorate(func):
        @wraps(func)
        def wrapper(self, *args, **kwargs):
            param_dict = self._join_parameter(kwargs)
            result = func(self, *args, **param_dict)
            result = result.astype(float)
            # 对结果进行溢出限制处理
            result: pd.DataFrame = np.clip(result, -1e100, 1e100)
            class_name = self.__class__.__name__
            param_name = '_' + '_'.join(str(value) for value in param_dict.values())
            result.name = class_name + param_name
            return result

        return wrapper

    @staticmethod
    def same_pd(func):
        @wraps(func)
        def wrapper(self, *args, **kwargs):
            param_dict = self._join_parameter(kwargs)
            result = func(self, *args, **param_dict)
            class_name = self.__class__.__name__
            param_name = '_' + '_'.join(str(value) for value in param_dict.values())
            result.name = class_name + param_name
            return result

        return wrapper

    @staticmethod
    def same(func):
        @wraps(func)
        def wrapper(self, *args, **kwargs):
            param_dict = self._join_parameter(kwargs)
            result = func(self, *args, **param_dict)
            return result

        return wrapper

    @staticmethod
    def static(data: pd.DataFrame):
        pass

    def run(self, data: pd.DataFrame, **kwargs):
        pass

    def get_source(self, np_or_frame: bool = True):
        """

        Args:
            np_or_frame(bool): 为真时返回np计算下的代码, 默认为True

        Returns:

        """
        if np_or_frame:
            source_code = inspect.getsource(self.static)
        else:
            source_code = inspect.getsource(self.run)
        return source_code

    def __str__(self):
        return self.txt

    def multi_run(self, data: pd.DataFrame, **kwargs):
        if isinstance(data.columns, pd.MultiIndex):
            result = (data.T  # 启用并行计算
                      .groupby(level=0)
                      .apply(lambda df: self.run(df.droplevel(0).T, **kwargs)).T
                      )
        else:
            result = data.T.apply(lambda df: self.run(df.T, **kwargs)).T
        return result

    @staticmethod
    def multi_run_from_gp(data: pd.DataFrame, fun_list: list[Callable]):

        def apply_fun(fun, data_values: np.ndarray):
            print('apply_fun begin')
            if data_values.ndim == 3:
                """单个函数的计算逻辑"""
                app_result = []
                for i_ in range(data_values.shape[2]):
                    try:
                        app_result.append(fun.execute(data_values[:, :, i_]))
                    except Exception as e:
                        print(f"Error in {str(fun)}: {str(e)}")
                        app_result.append(np.zeros((data_values.shape[0], 1)))  # 返回空对象避免阻塞
                    import gc
                    gc.collect()  # 手动释放内存
                app_result = np.hstack(app_result)
                print('finish')
                return app_result

            elif data_values.ndim == 2:
                app_result = fun.execute(data_values)
                print('finish')
                return app_result
            else:
                raise ValueError('数据错误')

        if isinstance(data.columns, pd.MultiIndex):
            data_column = data.columns.get_level_values(0).unique()
            data = np.stack([
                data.xs(name, level=0, axis=1).values
                for name in data_column
            ], axis=2)
        elif isinstance(data.columns, pd.Index):
            data = data.values
        else:
            raise ValueError('dataframe类型错误')
        print('data_process finish')
        # print(data)

        total_start = time.perf_counter()  # 总耗时起点
        batch_times = []  # 记录每个批次的耗时

        for i in tqdm(range(0, len(fun_list), 5), desc="Processing Batches"):
            batch_start = time.perf_counter()  # 批次起点
            batch_funs = fun_list[i:i + 5]
            batch_results = Parallel(n_jobs=max(1, os.cpu_count() - 1),
                                     max_nbytes='10M',  # 控制内存
                                     # return_as='generator',
                                     prefer="processes"  # 确保使用多进程模式
                                     )(delayed(apply_fun)(fun, data)
                                       for fun in batch_funs)
            # 记录批次耗时
            batch_duration = time.perf_counter() - batch_start
            print(f"当前批次耗时: {batch_duration}s")
            batch_times.append(batch_duration)
            yield from batch_results
        # 计算总耗时
        total_duration = time.perf_counter() - total_start

        # 输出时间统计信息
        print(f"\n总耗时: {total_duration:.2f}s")
        print(f"每批次平均耗时: {sum(batch_times) / len(batch_times):.2f}s")
        print(f"最长批次耗时: {max(batch_times):.2f}s")


    @staticmethod
    def multi_2(data: pd.DataFrame, fun_list: list[Callable]):
        def apply_fun(fun, data_values):
            print('begin fun')
            """单个函数的计算逻辑"""
            if isinstance(data_values.columns, pd.MultiIndex):
                result = (data_values.T
                          .groupby(level=0)
                          .apply(lambda df: fun.execute(df.droplevel(0).T.values)).T)
                result = pd.DataFrame(np.vstack(result), index=data_values.columns.get_level_values(0).unique(),
                                      columns=data_values.index)
                result = result.T
                print('end fun')
                return result
            else:
                return data.T.apply(lambda df: fun.execute(df.T.values)).T

        start_time = time.perf_counter()
        # 并行执行
        results = Parallel(n_jobs=15,
                           # return_as='generator',
                           max_nbytes='10M',  # 控制内存
                           prefer="processes"  # 确保使用多进程模式
                           )(delayed(apply_fun)(fun, data)
                             for fun in tqdm(fun_list, desc="Processing"))
        end_time = time.perf_counter()
        print(f"df算法: {end_time - start_time:.2f}s")
        return results

    @staticmethod
    def multi_run_from_gp2(data: pd.DataFrame, fun_list: list[Callable]):

        def apply_fun(fun, data_values: np.ndarray):
            print('apply_fun begin')
            app_results = fun.execute(data_values)
            import gc
            gc.collect()  # 手动释放内存
            print('finish')
            return app_results

        data_column = data.columns.get_level_values(0).unique()
        data = np.stack([
            data.xs(name, level=0, axis=1).values
            for name in data_column
        ], axis=2)
        total_start = time.perf_counter()  # 总耗时起点
        batch_times = []  # 记录每个批次的耗时

        for i in tqdm(range(0, len(fun_list), 5), desc="Processing Batches"):
            batch_start = time.perf_counter()  # 批次起点
            batch_funs = fun_list[i:i + 5]
            batch_results = Parallel(n_jobs=max(1, os.cpu_count() - 1),
                                     max_nbytes='10M',  # 控制内存
                                     return_as='generator',
                                     verbose=1,
                                     prefer="processes"  # 确保使用多进程模式
                                     )(delayed(apply_fun)(fun, data)
                                       for fun in batch_funs)
            # 记录批次耗时
            batch_duration = time.perf_counter() - batch_start
            print(f"当前批次耗时: {batch_duration}s")
            batch_times.append(batch_duration)
            yield from batch_results
        # 计算总耗时
        total_duration = time.perf_counter() - total_start

        # 输出时间统计信息
        print(f"\n总耗时: {total_duration:.2f}s")
        print(f"每批次平均耗时: {sum(batch_times) / len(batch_times):.2f}s")
        print(f"最长批次耗时: {max(batch_times):.2f}s")

