"""大矩阵处理模块
专门处理大规模矩阵运算，支持分块处理、内存优化和并行计算
"""
import numpy as np
from scipy import sparse
from scipy.linalg import svd, qr, lu
from typing import Tuple, Dict, Any, List, Optional, Union
import warnings
import gc
from concurrent.futures import ThreadPoolExecutor, as_completed
import threading
import os
from performance_optimization import PerformanceOptimizer, MemoryManager

class LargeMatrixHandler:
    """大矩阵处理器"""
    
    def __init__(self, max_memory_mb: float = 1024, block_size: int = 512):
        """
        初始化大矩阵处理器
        :param max_memory_mb: 最大内存使用量(MB)
        :param block_size: 分块大小
        """
        self.max_memory_mb = max_memory_mb
        self.block_size = block_size
        self.memory_manager = MemoryManager()
        self.performance_optimizer = PerformanceOptimizer()
        
    def estimate_operation_memory(self, matrix_shape: Tuple[int, int], 
                                operation: str = 'svd') -> float:
        """估算操作所需内存"""
        m, n = matrix_shape
        base_memory = self.memory_manager.estimate_memory_usage(matrix_shape)
        
        if operation == 'svd':
            # SVD需要额外的U, S, Vh矩阵
            u_memory = self.memory_manager.estimate_memory_usage((m, m))
            vh_memory = self.memory_manager.estimate_memory_usage((n, n))
            s_memory = self.memory_manager.estimate_memory_usage((min(m, n),))
            return base_memory + u_memory + vh_memory + s_memory
        elif operation == 'multiply':
            # 矩阵乘法结果内存
            return base_memory * 2  # 简化估算
        else:
            return base_memory * 1.5  # 默认估算
    
    def can_process_directly(self, matrix_shape: Tuple[int, int], 
                           operation: str = 'svd') -> bool:
        """判断是否可以直接处理"""
        required_memory = self.estimate_operation_memory(matrix_shape, operation)
        return required_memory <= self.max_memory_mb
    
    def block_svd_decomposition(self, matrix: np.ndarray, 
                              k: Optional[int] = None) -> Dict[str, Any]:
        """分块SVD分解"""
        m, n = matrix.shape
        
        # 如果矩阵足够小，直接处理
        if self.can_process_directly(matrix.shape, 'svd'):
            return self._direct_svd(matrix, k)
        
        # 使用随机化SVD算法处理大矩阵
        return self._randomized_svd(matrix, k)
    
    def _direct_svd(self, matrix: np.ndarray, k: Optional[int] = None) -> Dict[str, Any]:
        """直接SVD分解"""
        try:
            U, s, Vh = svd(matrix, full_matrices=False)
            
            if k is not None and k < len(s):
                U = U[:, :k]
                s = s[:k]
                Vh = Vh[:k, :]
            
            return {
                "U": U.tolist(),
                "singular_values": s.tolist(),
                "Vh": Vh.tolist(),
                "method": "direct"
            }
        except Exception as e:
            raise ValueError(f"直接SVD分解失败: {str(e)}")
    
    def _randomized_svd(self, matrix: np.ndarray, k: Optional[int] = None) -> Dict[str, Any]:
        """随机化SVD算法"""
        try:
            from sklearn.utils.extmath import randomized_svd
            
            m, n = matrix.shape
            if k is None:
                k = min(100, min(m, n) - 1)  # 默认保留100个主成分
            
            k = min(k, min(m, n) - 1)
            
            U, s, Vh = randomized_svd(matrix, n_components=k, random_state=42)
            
            return {
                "U": U.tolist(),
                "singular_values": s.tolist(),
                "Vh": Vh.tolist(),
                "method": "randomized",
                "n_components": k
            }
        except ImportError:
            # 如果sklearn不可用，使用自实现的随机化SVD
            return self._custom_randomized_svd(matrix, k)
        except Exception as e:
            raise ValueError(f"随机化SVD分解失败: {str(e)}")
    
    def _custom_randomized_svd(self, matrix: np.ndarray, k: Optional[int] = None) -> Dict[str, Any]:
        """自实现的随机化SVD"""
        m, n = matrix.shape
        if k is None:
            k = min(100, min(m, n) - 1)
        
        k = min(k, min(m, n) - 1)
        
        # 随机投影
        omega = np.random.randn(n, k + 10)  # 过采样
        Y = matrix @ omega
        
        # QR分解
        Q, _ = qr(Y, mode='economic')
        
        # 投影到子空间
        B = Q.T @ matrix
        
        # 对B进行SVD
        U_tilde, s, Vh = svd(B, full_matrices=False)
        
        # 恢复U
        U = Q @ U_tilde
        
        # 截断到k个分量
        if len(s) > k:
            U = U[:, :k]
            s = s[:k]
            Vh = Vh[:k, :]
        
        return {
            "U": U.tolist(),
            "singular_values": s.tolist(),
            "Vh": Vh.tolist(),
            "method": "custom_randomized",
            "n_components": k
        }
    
    def block_matrix_multiply(self, A: np.ndarray, B: np.ndarray) -> np.ndarray:
        """分块矩阵乘法"""
        m, k1 = A.shape
        k2, n = B.shape
        
        if k1 != k2:
            raise ValueError(f"矩阵维度不匹配: {A.shape} × {B.shape}")
        
        # 检查是否需要分块
        result_memory = self.memory_manager.estimate_memory_usage((m, n))
        if result_memory <= self.max_memory_mb:
            return A @ B
        
        # 分块计算
        result = np.zeros((m, n), dtype=A.dtype)
        
        # 计算分块大小
        block_m = min(self.block_size, m)
        block_n = min(self.block_size, n)
        block_k = min(self.block_size, k1)
        
        for i in range(0, m, block_m):
            for j in range(0, n, block_n):
                # 累加分块乘法结果
                block_result = np.zeros((min(block_m, m-i), min(block_n, n-j)), dtype=A.dtype)
                
                for l in range(0, k1, block_k):
                    A_block = A[i:i+block_m, l:l+block_k]
                    B_block = B[l:l+block_k, j:j+block_n]
                    block_result += A_block @ B_block
                
                result[i:i+block_m, j:j+block_n] = block_result
        
        return result
    
    def incremental_svd_update(self, U: np.ndarray, s: np.ndarray, Vh: np.ndarray,
                             new_data: np.ndarray) -> Dict[str, Any]:
        """增量SVD更新"""
        try:
            # 简化的增量SVD实现
            # 将新数据投影到现有子空间
            projected = U.T @ new_data
            
            # 计算残差
            residual = new_data - U @ projected
            
            # 对残差进行QR分解
            Q_r, R_r = qr(residual, mode='economic')
            
            # 构建扩展矩阵
            S_diag = np.diag(s)
            extended_matrix = np.block([[S_diag, projected],
                                      [np.zeros((Q_r.shape[1], S_diag.shape[1])), R_r]])
            
            # 对扩展矩阵进行SVD
            U_ext, s_new, Vh_ext = svd(extended_matrix, full_matrices=False)
            
            # 更新U和Vh
            U_new = np.hstack([U, Q_r]) @ U_ext
            Vh_new = Vh_ext @ np.vstack([Vh, np.zeros((R_r.shape[0], Vh.shape[1]))])
            
            return {
                "U": U_new.tolist(),
                "singular_values": s_new.tolist(),
                "Vh": Vh_new.tolist(),
                "method": "incremental"
            }
        except Exception as e:
            raise ValueError(f"增量SVD更新失败: {str(e)}")
    
    def memory_efficient_reconstruction(self, U: np.ndarray, s: np.ndarray, 
                                      Vh: np.ndarray, k: int) -> np.ndarray:
        """内存高效的矩阵重构"""
        if k > len(s):
            k = len(s)
        
        # 分块重构以节省内存
        m, n = U.shape[0], Vh.shape[1]
        result = np.zeros((m, n), dtype=U.dtype)
        
        block_size = min(self.block_size, m)
        
        for i in range(0, m, block_size):
            end_i = min(i + block_size, m)
            U_block = U[i:end_i, :k]
            S_block = np.diag(s[:k])
            Vh_block = Vh[:k, :]
            
            result[i:end_i, :] = U_block @ S_block @ Vh_block
        
        return result
    
    def adaptive_precision_svd(self, matrix: np.ndarray, 
                             target_precision: float = 0.95) -> Dict[str, Any]:
        """自适应精度SVD"""
        try:
            # 首先进行完整SVD或随机化SVD
            if self.can_process_directly(matrix.shape, 'svd'):
                U, s, Vh = svd(matrix, full_matrices=False)
            else:
                result = self._randomized_svd(matrix, k=min(100, min(matrix.shape)))
                U = np.array(result["U"])
                s = np.array(result["singular_values"])
                Vh = np.array(result["Vh"])
            
            # 计算累积能量
            total_energy = np.sum(s**2)
            cumulative_energy = np.cumsum(s**2)
            energy_ratio = cumulative_energy / total_energy
            
            # 找到满足精度要求的最小k
            k = np.argmax(energy_ratio >= target_precision) + 1
            k = max(1, min(k, len(s)))
            
            return {
                "U": U[:, :k].tolist(),
                "singular_values": s[:k].tolist(),
                "Vh": Vh[:k, :].tolist(),
                "method": "adaptive_precision",
                "n_components": k,
                "precision_achieved": float(energy_ratio[k-1]),
                "compression_ratio": float(k / min(matrix.shape))
            }
        except Exception as e:
            raise ValueError(f"自适应精度SVD失败: {str(e)}")
    
    def cleanup_memory(self):
        """清理内存"""
        gc.collect()