"""高阶张量模块
包含三阶及以上张量运算、张量分解等功能
"""
import numpy as np
from typing import Union, Tuple, List, Optional
from scipy.linalg import svd, qr
import warnings

class TensorOperations:
    @staticmethod
    def tensor_unfolding(tensor: np.ndarray, mode: int) -> np.ndarray:
        """
        张量展开（mode-n unfolding）
        :param tensor: 输入张量（3阶或更高）
        :param mode: 展开模式
        :return: 展开后的矩阵
        """
        if mode >= len(tensor.shape) or mode < 0:
            raise ValueError(f"Mode {mode} is invalid for tensor with {len(tensor.shape)} dimensions")
        return np.reshape(np.moveaxis(tensor, mode, 0),
                         (tensor.shape[mode], -1))
    
    @staticmethod
    def tensor_folding(matrix: np.ndarray, mode: int, shape: Tuple[int, ...]) -> np.ndarray:
        """
        张量折叠（mode-n folding）
        :param matrix: 输入矩阵
        :param mode: 折叠模式
        :param shape: 目标张量形状
        :return: 折叠后的张量
        """
        if mode >= len(shape) or mode < 0:
            raise ValueError(f"Mode {mode} is invalid for shape {shape}")
        
        # 重新排列维度
        new_shape = [shape[mode]] + [shape[i] for i in range(len(shape)) if i != mode]
        tensor = np.reshape(matrix, new_shape)
        
        # 移动轴回到原位置
        axes = [0] + list(range(1, mode + 1)) + list(range(mode + 1, len(shape)))
        inverse_axes = [0] * len(axes)
        for i, ax in enumerate(axes):
            inverse_axes[ax] = i
        
        return np.moveaxis(tensor, 0, mode)
    
    @staticmethod
    def tensor_contraction(tensor1: np.ndarray, tensor2: np.ndarray, 
                          axes1: List[int], axes2: List[int]) -> np.ndarray:
        """
        张量缩并运算
        :param tensor1: 第一个张量
        :param tensor2: 第二个张量
        :param axes1: 第一个张量的缩并轴
        :param axes2: 第二个张量的缩并轴
        :return: 缩并结果
        """
        return np.tensordot(tensor1, tensor2, axes=(axes1, axes2))
    
    @staticmethod
    def tensor_product(tensor1: np.ndarray, tensor2: np.ndarray) -> np.ndarray:
        """
        张量外积
        :param tensor1: 第一个张量
        :param tensor2: 第二个张量
        :return: 外积结果
        """
        return np.outer(tensor1.flatten(), tensor2.flatten()).reshape(
            tensor1.shape + tensor2.shape)
    
    @staticmethod
    def khatri_rao_product(matrices: List[np.ndarray]) -> np.ndarray:
        """
        Khatri-Rao积
        :param matrices: 矩阵列表
        :return: Khatri-Rao积结果
        """
        if not matrices:
            raise ValueError("Matrix list cannot be empty")
        
        result = matrices[0]
        for matrix in matrices[1:]:
            if result.shape[1] != matrix.shape[1]:
                raise ValueError("All matrices must have the same number of columns")
            
            # 计算Khatri-Rao积
            rows1, cols = result.shape
            rows2 = matrix.shape[0]
            
            new_result = np.zeros((rows1 * rows2, cols))
            for j in range(cols):
                new_result[:, j] = np.kron(result[:, j], matrix[:, j])
            
            result = new_result
        
        return result
    
    @staticmethod
    def hadamard_product(tensor1: np.ndarray, tensor2: np.ndarray) -> np.ndarray:
        """
        张量Hadamard积（逐元素乘法）
        :param tensor1: 第一个张量
        :param tensor2: 第二个张量
        :return: Hadamard积结果
        """
        if tensor1.shape != tensor2.shape:
            raise ValueError("Tensors must have the same shape for Hadamard product")
        return tensor1 * tensor2
    
    @staticmethod
    def tucker_decomposition(tensor: np.ndarray, ranks: List[int], 
                            max_iter: int = 100, tol: float = 1e-6) -> Tuple[np.ndarray, List[np.ndarray], float]:
        """
        Tucker分解
        :param tensor: 输入张量
        :param ranks: 各模式的秩
        :param max_iter: 最大迭代次数
        :param tol: 收敛容差
        :return: (核心张量, 因子矩阵列表, 重构误差)
        """
        dims = tensor.shape
        n_modes = len(dims)
        
        if len(ranks) != n_modes:
            raise ValueError("Number of ranks must equal number of modes")
        
        # 初始化因子矩阵
        factors = []
        for mode in range(n_modes):
            unfolded = TensorOperations.tensor_unfolding(tensor, mode)
            U, _, _ = svd(unfolded, full_matrices=False)
            factors.append(U[:, :ranks[mode]])
        
        prev_error = float('inf')
        
        for iteration in range(max_iter):
            for mode in range(n_modes):
                # 计算模式乘积
                temp_tensor = tensor.copy()
                for i in range(n_modes):
                    if i != mode:
                        temp_tensor = TensorOperations.mode_product(temp_tensor, factors[i].T, i)
                
                # 展开并进行SVD
                unfolded = TensorOperations.tensor_unfolding(temp_tensor, mode)
                U, _, _ = svd(unfolded, full_matrices=False)
                factors[mode] = U[:, :ranks[mode]]
            
            # 计算核心张量
            core = tensor.copy()
            for mode in range(n_modes):
                core = TensorOperations.mode_product(core, factors[mode].T, mode)
            
            # 计算重构误差
            reconstructed = TensorOperations.tucker_reconstruct(core, factors)
            error = np.linalg.norm(tensor - reconstructed) / np.linalg.norm(tensor)
            
            if abs(prev_error - error) < tol:
                break
            prev_error = error
        
        return core, factors, error
    
    @staticmethod
    def tucker_reconstruct(core: np.ndarray, factors: List[np.ndarray]) -> np.ndarray:
        """
        从Tucker分解重构张量
        :param core: 核心张量
        :param factors: 因子矩阵列表
        :return: 重构的张量
        """
        result = core.copy()
        for mode, factor in enumerate(factors):
            result = TensorOperations.mode_product(result, factor, mode)
        return result
    
    @staticmethod
    def mode_product(tensor: np.ndarray, matrix: np.ndarray, mode: int) -> np.ndarray:
        """
        张量模式乘积
        :param tensor: 输入张量
        :param matrix: 乘积矩阵
        :param mode: 乘积模式
        :return: 乘积结果
        """
        # 展开张量
        unfolded = TensorOperations.tensor_unfolding(tensor, mode)
        
        # 矩阵乘法
        result_unfolded = matrix @ unfolded
        
        # 重新折叠
        new_shape = list(tensor.shape)
        new_shape[mode] = matrix.shape[0]
        
        return TensorOperations.tensor_folding(result_unfolded, mode, tuple(new_shape))
    
    @staticmethod
    def tensor_svd(tensor: np.ndarray, mode: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """
        张量SVD（沿指定模式）
        :param tensor: 输入张量
        :param mode: SVD模式
        :return: (U, S, Vt)
        """
        unfolded = TensorOperations.tensor_unfolding(tensor, mode)
        return svd(unfolded, full_matrices=False)
    
    @staticmethod
    def tensor_qr(tensor: np.ndarray, mode: int) -> Tuple[np.ndarray, np.ndarray]:
        """
        张量QR分解（沿指定模式）
        :param tensor: 输入张量
        :param mode: QR模式
        :return: (Q, R)
        """
        unfolded = TensorOperations.tensor_unfolding(tensor, mode)
        return qr(unfolded, mode='economic')
    
    @staticmethod
    def tensor_norm(tensor: np.ndarray, ord: Union[str, int] = 'fro') -> float:
        """
        计算张量范数
        :param tensor: 输入张量
        :param ord: 范数类型
        :return: 范数值
        """
        if ord == 'fro':
            return np.linalg.norm(tensor.flatten())
        elif ord == 1:
            return np.sum(np.abs(tensor))
        elif ord == 2:
            return np.linalg.norm(tensor.flatten(), ord=2)
        elif ord == np.inf:
            return np.max(np.abs(tensor))
        else:
            return np.linalg.norm(tensor.flatten(), ord=ord)
    
    @staticmethod
    def tensor_rank(tensor: np.ndarray, tol: float = 1e-12) -> List[int]:
        """
        计算张量的多线性秩
        :param tensor: 输入张量
        :param tol: 容差
        :return: 各模式的秩
        """
        ranks = []
        for mode in range(len(tensor.shape)):
            unfolded = TensorOperations.tensor_unfolding(tensor, mode)
            _, s, _ = svd(unfolded, full_matrices=False)
            rank = np.sum(s > tol)
            ranks.append(rank)
        return ranks
    
    @staticmethod
    def higher_order_svd(tensor: np.ndarray) -> Tuple[np.ndarray, List[np.ndarray]]:
        """
        高阶SVD（HOSVD）
        :param tensor: 输入张量
        :return: (核心张量, 正交矩阵列表)
        """
        n_modes = len(tensor.shape)
        factors = []
        
        # 对每个模式进行SVD
        for mode in range(n_modes):
            unfolded = TensorOperations.tensor_unfolding(tensor, mode)
            U, _, _ = svd(unfolded, full_matrices=False)
            factors.append(U)
        
        # 计算核心张量
        core = tensor.copy()
        for mode in range(n_modes):
            core = TensorOperations.mode_product(core, factors[mode].T, mode)
        
        return core, factors
    
    @staticmethod
    def tensor_completion(tensor: np.ndarray, mask: np.ndarray, rank: int, 
                         max_iter: int = 100, tol: float = 1e-6) -> np.ndarray:
        """
        张量补全（基于CP分解）
        :param tensor: 不完整张量
        :param mask: 观测掩码（1表示观测，0表示缺失）
        :param rank: CP秩
        :param max_iter: 最大迭代次数
        :param tol: 收敛容差
        :return: 补全后的张量
        """
        # 初始化
        completed = tensor.copy()
        
        for iteration in range(max_iter):
            # CP分解
            factors, _ = TensorOperations.cp_decomposition(completed, rank, max_iter=10)
            
            # 重构
            reconstructed = TensorOperations.cp_reconstruct(factors)
            
            # 更新观测位置
            prev_completed = completed.copy()
            completed = mask * tensor + (1 - mask) * reconstructed
            
            # 检查收敛
            if np.linalg.norm(completed - prev_completed) / np.linalg.norm(completed) < tol:
                break
        
        return completed
    
    @staticmethod
    def cp_decomposition(tensor: np.ndarray, rank: int, 
                        max_iter: int = 100, tol: float = 1e-6) -> Tuple[List[np.ndarray], float]:
        """
        CP分解（CANDECOMP/PARAFAC）改进版
        :param tensor: 输入张量
        :param rank: 分解秩
        :param max_iter: 最大迭代次数
        :param tol: 收敛容差
        :return: (分解后的因子矩阵列表, 重构误差)
        """
        dims = tensor.shape
        n_modes = len(dims)
        
        # 初始化因子矩阵
        factors = [np.random.rand(d, rank) for d in dims]
        
        prev_error = float('inf')
        
        for iteration in range(max_iter):
            for mode in range(n_modes):
                # 计算其他模式的Khatri-Rao积
                other_factors = [factors[i] for i in range(n_modes) if i != mode]
                kr = TensorOperations.khatri_rao_product(other_factors[::-1])
                
                # 计算当前模式展开矩阵
                unfolded = TensorOperations.tensor_unfolding(tensor, mode)
                
                # 最小二乘解
                factors[mode] = unfolded @ np.linalg.pinv(kr.T)
                
                # 归一化（除了最后一个因子）
                if mode < n_modes - 1:
                    norms = np.linalg.norm(factors[mode], axis=0)
                    norms[norms == 0] = 1  # 避免除零
                    factors[mode] = factors[mode] / norms
            
            # 计算重构误差
            reconstructed = TensorOperations.cp_reconstruct(factors)
            error = np.linalg.norm(tensor - reconstructed) / np.linalg.norm(tensor)
            
            if abs(prev_error - error) < tol:
                break
            prev_error = error
        
        return factors, error
    
    @staticmethod
    def cp_reconstruct(factors: List[np.ndarray]) -> np.ndarray:
        """
        从CP分解因子重构张量
        :param factors: CP分解因子列表
        :return: 重构的张量
        """
        if not factors or len(factors) == 0:
            raise ValueError("Factors list cannot be empty")
        
        rank = factors[0].shape[1]
        shape = tuple(f.shape[0] for f in factors)
        n_modes = len(factors)
        
        # 初始化结果张量
        tensor = np.zeros(shape)
        
        # 对每个秩进行重构
        for r in range(rank):
            # 从第一个因子开始
            component = factors[0][:, r].copy()
            
            # 逐步计算外积
            for mode in range(1, n_modes):
                component = np.outer(component.flatten(), factors[mode][:, r])
            
            # 重塑为正确的张量形状
            component = component.reshape(shape)
            tensor += component
        
        return tensor