"""性能优化模块
提供高性能的矩阵运算实现和内存管理
"""
import numpy as np
from scipy import sparse
from typing import Union, Optional, Tuple, List
import warnings
import threading
import os
from concurrent.futures import ThreadPoolExecutor

# 可选的numba依赖
try:
    from numba import jit, prange
    NUMBA_AVAILABLE = True
except ImportError:
    NUMBA_AVAILABLE = False
    # 如果numba不可用，创建一个空的装饰器
    def jit(*args, **kwargs):
        def decorator(func):
            return func
        return decorator
    
    def prange(*args, **kwargs):
        return range(*args, **kwargs)

class PerformanceOptimizer:
    """性能优化器"""
    
    def __init__(self, max_workers: int = None):
        self.max_workers = max_workers or min(4, os.cpu_count())
        self.thread_pool = ThreadPoolExecutor(max_workers=self.max_workers)
    
    @staticmethod
    def fast_matrix_multiply(A: np.ndarray, B: np.ndarray) -> np.ndarray:
        """高性能矩阵乘法（使用Numba加速）"""
        if NUMBA_AVAILABLE:
            return PerformanceOptimizer._numba_matmul(A, B)
        else:
            return A @ B
    
    @staticmethod
    @jit(nopython=True, parallel=True)
    def _numba_matmul(A: np.ndarray, B: np.ndarray) -> np.ndarray:
        """使用Numba加速的矩阵乘法"""
        m, k = A.shape
        k2, n = B.shape
        
        if k != k2:
            raise ValueError("Matrix dimensions incompatible")
        
        C = np.zeros((m, n), dtype=A.dtype)
        
        for i in prange(m):
            for j in prange(n):
                for l in range(k):
                    C[i, j] += A[i, l] * B[l, j]
        
        return C
    
    @staticmethod
    def memory_efficient_operations(matrices: List[np.ndarray], 
                                  operation: str = 'chain_multiply') -> np.ndarray:
        """内存高效的矩阵运算"""
        if operation == 'chain_multiply':
            return PerformanceOptimizer._chain_multiply_optimized(matrices)
        else:
            raise ValueError(f"Unknown operation: {operation}")
    
    @staticmethod
    def _chain_multiply_optimized(matrices: List[np.ndarray]) -> np.ndarray:
        """优化的矩阵链乘法"""
        if len(matrices) == 0:
            raise ValueError("Empty matrix list")
        if len(matrices) == 1:
            return matrices[0].copy()
        
        # 动态规划找到最优乘法顺序
        n = len(matrices)
        dims = [mat.shape[0] for mat in matrices] + [matrices[-1].shape[1]]
        
        # dp[i][j] 存储从矩阵i到j的最小乘法次数
        dp = [[0] * n for _ in range(n)]
        split = [[0] * n for _ in range(n)]
        
        for length in range(2, n + 1):
            for i in range(n - length + 1):
                j = i + length - 1
                dp[i][j] = float('inf')
                
                for k in range(i, j):
                    cost = dp[i][k] + dp[k+1][j] + dims[i] * dims[k+1] * dims[j+1]
                    if cost < dp[i][j]:
                        dp[i][j] = cost
                        split[i][j] = k
        
        # 根据最优分割点进行矩阵乘法
        def multiply_range(start: int, end: int) -> np.ndarray:
            if start == end:
                return matrices[start]
            
            k = split[start][end]
            left = multiply_range(start, k)
            right = multiply_range(k + 1, end)
            return left @ right
        
        return multiply_range(0, n - 1)
    
    @staticmethod
    def sparse_matrix_operations(A: sparse.spmatrix, B: sparse.spmatrix, 
                               operation: str) -> sparse.spmatrix:
        """稀疏矩阵优化运算"""
        if operation == 'multiply':
            return A @ B
        elif operation == 'add':
            return A + B
        elif operation == 'subtract':
            return A - B
        else:
            raise ValueError(f"Unknown sparse operation: {operation}")
    
    @staticmethod
    def block_matrix_operations(blocks: List[List[np.ndarray]], 
                              operation: str = 'assemble') -> np.ndarray:
        """分块矩阵运算"""
        if operation == 'assemble':
            return np.block(blocks)
        elif operation == 'multiply':
            # 分块矩阵乘法
            return PerformanceOptimizer._block_multiply(blocks)
        else:
            raise ValueError(f"Unknown block operation: {operation}")
    
    @staticmethod
    def _block_multiply(A_blocks: List[List[np.ndarray]], 
                       B_blocks: List[List[np.ndarray]]) -> np.ndarray:
        """分块矩阵乘法"""
        # 简化实现，假设A和B都是2x2分块矩阵
        if len(A_blocks) != 2 or len(A_blocks[0]) != 2:
            raise ValueError("Only 2x2 block matrices supported")
        
        A11, A12 = A_blocks[0]
        A21, A22 = A_blocks[1]
        B11, B12 = B_blocks[0]
        B21, B22 = B_blocks[1]
        
        C11 = A11 @ B11 + A12 @ B21
        C12 = A11 @ B12 + A12 @ B22
        C21 = A21 @ B11 + A22 @ B21
        C22 = A21 @ B12 + A22 @ B22
        
        return np.block([[C11, C12], [C21, C22]])
    
    def high_performance_matmul(self, A: np.ndarray, B: np.ndarray) -> np.ndarray:
        """高性能矩阵乘法"""
        # 根据矩阵大小选择最优算法
        if A.shape[0] * A.shape[1] * B.shape[1] > 1000000:  # 大矩阵
            return PerformanceOptimizer.fast_matrix_multiply(A, B)
        else:
            return A @ B
    
    def parallel_matrix_operations(self, matrices: List[np.ndarray], 
                                 operation_func, *args, **kwargs) -> List[np.ndarray]:
        """并行矩阵运算"""
        futures = []
        for matrix in matrices:
            future = self.thread_pool.submit(operation_func, matrix, *args, **kwargs)
            futures.append(future)
        
        results = [future.result() for future in futures]
        return results
    
    def __del__(self):
        """清理线程池"""
        if hasattr(self, 'thread_pool'):
            self.thread_pool.shutdown(wait=True)

class MemoryManager:
    """内存管理器"""
    
    @staticmethod
    def estimate_memory_usage(matrix_shape: Tuple[int, ...], 
                            dtype: np.dtype = np.float64) -> float:
        """估算矩阵内存使用量（MB）"""
        elements = np.prod(matrix_shape)
        bytes_per_element = np.dtype(dtype).itemsize
        total_bytes = elements * bytes_per_element
        return total_bytes / (1024 * 1024)  # 转换为MB
    
    @staticmethod
    def check_memory_availability(required_mb: float) -> bool:
        """检查内存可用性"""
        try:
            import psutil
            available_mb = psutil.virtual_memory().available / (1024 * 1024)
            return available_mb > required_mb * 1.2  # 留20%余量
        except ImportError:
            warnings.warn("psutil not available, cannot check memory")
            return True
    
    @staticmethod
    def optimize_dtype(matrix: np.ndarray, preserve_precision: bool = True) -> np.ndarray:
        """优化数据类型以节省内存"""
        if not preserve_precision:
            # 尝试转换为更小的数据类型
            if matrix.dtype == np.float64:
                if np.allclose(matrix, matrix.astype(np.float32)):
                    return matrix.astype(np.float32)
            elif matrix.dtype == np.complex128:
                if np.allclose(matrix, matrix.astype(np.complex64)):
                    return matrix.astype(np.complex64)
        
        return matrix
    
    @staticmethod
    def create_memory_mapped_matrix(shape: Tuple[int, ...], 
                                  dtype: np.dtype = np.float64,
                                  filename: Optional[str] = None) -> np.ndarray:
        """创建内存映射矩阵"""
        return np.memmap(filename, dtype=dtype, mode='w+', shape=shape)

# 全局性能优化器实例
performance_optimizer = PerformanceOptimizer()
memory_manager = MemoryManager()