"""
计算加速器
=========

提供Numba JIT编译和其他计算加速功能。
"""

import numpy as np
from typing import Callable, Any, Dict
import warnings

# 尝试导入numba
try:
    from numba import jit, prange
    NUMBA_AVAILABLE = True
except ImportError:
    NUMBA_AVAILABLE = False
    warnings.warn("Numba not available. Install numba for better performance: pip install numba")
    
    # 定义空装饰器
    def jit(*args, **kwargs):
        def decorator(func):
            return func
        return decorator
    
    def prange(x):
        return range(x)


class NumbaAccelerator:
    """
    Numba加速器类
    
    提供JIT编译和并行计算加速功能。
    """
    
    def __init__(self, use_numba: bool = True):
        """
        初始化加速器
        
        Parameters:
        -----------
        use_numba : bool
            是否使用Numba加速
        """
        self.use_numba = use_numba and NUMBA_AVAILABLE
        self.compiled_functions = {}
        
        if not NUMBA_AVAILABLE and use_numba:
            warnings.warn("Numba requested but not available. Using pure Python.")
    
    def compile_factor(self, func: Callable, **jit_kwargs) -> Callable:
        """
        编译因子函数
        
        Parameters:
        -----------
        func : Callable
            要编译的函数
        **jit_kwargs
            JIT编译参数
            
        Returns:
        --------
        Callable
            编译后的函数
        """
        if not self.use_numba:
            return func
        
        func_name = func.__name__
        
        if func_name not in self.compiled_functions:
            # 设置默认JIT参数
            default_kwargs = {
                'nopython': True,
                'parallel': True,
                'cache': True
            }
            default_kwargs.update(jit_kwargs)
            
            try:
                compiled_func = jit(**default_kwargs)(func)
                self.compiled_functions[func_name] = compiled_func
                return compiled_func
            except Exception as e:
                warnings.warn(f"Failed to compile {func_name} with Numba: {str(e)}")
                return func
        
        return self.compiled_functions[func_name]
    
    def optimize_memory_layout(self, data: np.ndarray) -> np.ndarray:
        """
        优化内存布局
        
        Parameters:
        -----------
        data : np.ndarray
            输入数据
            
        Returns:
        --------
        np.ndarray
            优化后的数据
        """
        if not data.flags['C_CONTIGUOUS']:
            return np.ascontiguousarray(data)
        return data
    
    def parallel_compute(self, func: Callable, data: np.ndarray, 
                        *args, **kwargs) -> np.ndarray:
        """
        并行计算
        
        Parameters:
        -----------
        func : Callable
            计算函数
        data : np.ndarray
            输入数据
        *args, **kwargs
            函数参数
            
        Returns:
        --------
        np.ndarray
            计算结果
        """
        # 优化内存布局
        optimized_data = self.optimize_memory_layout(data)
        
        # 编译函数
        compiled_func = self.compile_factor(func)
        
        # 执行计算
        return compiled_func(optimized_data, *args, **kwargs)
    
    def cache_compiled_functions(self) -> Dict[str, Any]:
        """
        获取已编译函数的缓存信息
        
        Returns:
        --------
        Dict[str, Any]
            缓存信息
        """
        return {
            'compiled_count': len(self.compiled_functions),
            'function_names': list(self.compiled_functions.keys()),
            'numba_available': NUMBA_AVAILABLE,
            'numba_enabled': self.use_numba
        }
    
    def clear_cache(self):
        """清空编译缓存"""
        self.compiled_functions.clear()
    
    @staticmethod
    def is_numba_available() -> bool:
        """检查Numba是否可用"""
        return NUMBA_AVAILABLE
    
    @staticmethod
    def get_numba_info() -> Dict[str, Any]:
        """获取Numba信息"""
        if NUMBA_AVAILABLE:
            import numba
            return {
                'available': True,
                'version': numba.__version__,
                'threading_layer': getattr(numba.config, 'THREADING_LAYER', 'unknown')
            }
        else:
            return {
                'available': False,
                'version': None,
                'threading_layer': None
            }


# 预编译的常用函数
@jit(nopython=True, parallel=True, cache=True)
def parallel_rolling_sum(data: np.ndarray, window: int) -> np.ndarray:
    """并行滚动求和"""
    n_stocks, n_days = data.shape
    result = np.empty((n_stocks, n_days))
    result[:, :window-1] = np.nan
    
    for i in prange(n_stocks):
        for j in range(window-1, n_days):
            result[i, j] = np.sum(data[i, j-window+1:j+1])
    
    return result


@jit(nopython=True, parallel=True, cache=True)
def parallel_rolling_mean(data: np.ndarray, window: int) -> np.ndarray:
    """并行滚动均值"""
    n_stocks, n_days = data.shape
    result = np.empty((n_stocks, n_days))
    result[:, :window-1] = np.nan
    
    for i in prange(n_stocks):
        for j in range(window-1, n_days):
            result[i, j] = np.mean(data[i, j-window+1:j+1])
    
    return result


@jit(nopython=True, parallel=True, cache=True)
def parallel_cross_sectional_rank(data: np.ndarray) -> np.ndarray:
    """并行截面排序"""
    n_stocks, n_days = data.shape
    result = np.empty((n_stocks, n_days))
    
    for j in prange(n_days):
        day_data = data[:, j]
        valid_mask = ~np.isnan(day_data)
        
        if np.sum(valid_mask) > 1:
            valid_data = day_data[valid_mask]
            # 简化的排序实现
            for i in range(n_stocks):
                if valid_mask[i]:
                    rank = np.sum(valid_data <= day_data[i]) / len(valid_data)
                    result[i, j] = rank
                else:
                    result[i, j] = np.nan
        else:
            result[:, j] = np.nan
    
    return result