# uamcf/methods/factorization.py
import numpy as np
from typing import Dict, Any, Optional, List, Tuple, Union

from .base import CompressionMethod


class Factorization(CompressionMethod):
    """实现张量分解的压缩方法"""

    def __init__(self, config: Optional[Dict] = None):
        """
        初始化张量分解方法

        Args:
            config: 分解配置参数，包括：
                - rank_ratio: 秩比例，控制分解后的秩 (float, 0-1之间，默认: 0.5)
                - method: 分解方法 ('svd', 'cp', 'tucker', 默认: 'svd')
                - min_rank: 最小秩 (int, 默认: 4)
                - fine_tune: 分解后是否微调 (bool, 默认: True)
        """
        super().__init__(config)
        self.rank_ratio = self.config.get("rank_ratio", 0.5)
        self.method = self.config.get("method", "svd")
        self.min_rank = self.config.get("min_rank", 4)
        self.fine_tune = self.config.get("fine_tune", True)

        self.logger.info(f"Initialized tensor factorization with {self.method} method and rank_ratio={self.rank_ratio}")

    def compress(self, layer: Any) -> Any:
        """
        对层的权重进行张量分解

        Args:
            layer: 要分解的层

        Returns:
            分解后的层
        """
        self.logger.info(f"Applying {self.method} factorization with rank_ratio={self.rank_ratio}")

        try:
            # 这里的实现是框架无关的伪代码
            # 实际实现应由特定框架的适配器处理
            if hasattr(layer, "weight") and layer.weight is not None:
                # 记录原始权重用于评估
                self.original_weight = layer.weight.clone() if hasattr(layer.weight, "clone") else layer.weight

                # 根据选择的方法进行分解
                if self.method == "svd":
                    layer = self._svd_decomposition(layer)
                elif self.method == "cp":
                    layer = self._cp_decomposition(layer)
                elif self.method == "tucker":
                    layer = self._tucker_decomposition(layer)
                else:
                    self.logger.warning(f"Unknown factorization method: {self.method}, using SVD")
                    layer = self._svd_decomposition(layer)

                self.logger.info("Factorization applied successfully")
            else:
                self.logger.warning("Layer has no weights to factorize")

            return layer

        except Exception as e:
            self.logger.error(f"Factorization failed: {str(e)}")
            # 出错时返回原始层
            return layer

    def _svd_decomposition(self, layer: Any) -> Any:
        """
        使用SVD进行矩阵分解

        主要适用于全连接层(Linear/Dense)或可以重塑为2D的卷积层
        """
        try:
            # 获取权重
            weights = layer.weight

            # 判断层类型
            is_conv = len(weights.shape) == 4  # 卷积层通常是4D权重

            if is_conv:
                # 卷积层权重通常为 [out_channels, in_channels, kernel_h, kernel_w]
                out_c, in_c, k_h, k_w = weights.shape

                # 对卷积层应用SVD需要重塑权重
                weights_2d = weights.reshape(out_c, in_c * k_h * k_w)
            else:
                # 全连接层权重通常为 [out_features, in_features]
                weights_2d = weights

            # 执行SVD
            U, S, Vh = np.linalg.svd(weights_2d, full_matrices=False)

            # 确定保留的秩
            full_rank = min(weights_2d.shape)
            rank = max(self.min_rank, int(full_rank * self.rank_ratio))
            rank = min(rank, full_rank)  # 确保不超过最大秩

            # 截断SVD结果
            U_r = U[:, :rank]
            S_r = S[:rank]
            Vh_r = Vh[:rank, :]

            # 创建因子矩阵
            A = U_r @ np.diag(np.sqrt(S_r))
            B = np.diag(np.sqrt(S_r)) @ Vh_r

            # 保存分解结果
            if is_conv:
                # 需要适配器支持替换卷积层为两个卷积层
                # 这里只保存分解结果，由适配器进行实际替换
                layer.factorized_weights = {
                    "method": "svd",
                    "A": A,
                    "B": B.reshape(rank, in_c, k_h, k_w),
                    "original_shape": weights.shape,
                    "rank": rank
                }
            else:
                # 对于全连接层，直接替换权重
                # 实际替换应由适配器执行
                layer.factorized_weights = {
                    "method": "svd",
                    "A": A,
                    "B": B,
                    "original_shape": weights.shape,
                    "rank": rank
                }

            return layer

        except Exception as e:
            self.logger.error(f"SVD decomposition failed: {str(e)}")
            return layer

    def _cp_decomposition(self, layer: Any) -> Any:
        """
        使用CP分解进行张量分解

        主要适用于卷积层
        """
        try:
            # CP分解通常需要专门的张量分解库（如tensorly）
            # 这里提供概念性实现

            # 获取权重
            weights = layer.weight

            # 确保是卷积层
            if len(weights.shape) != 4:
                self.logger.warning("CP decomposition is typically applied to 4D conv weights")
                return layer

            # 卷积层权重通常为 [out_channels, in_channels, kernel_h, kernel_w]
            out_c, in_c, k_h, k_w = weights.shape

            # 估计适合的秩
            # 对于CP分解，秩决定了有多少个分量
            full_rank = min(out_c, in_c * k_h * k_w)
            rank = max(self.min_rank, int(full_rank * self.rank_ratio))

            # CP分解需要专用库实现
            # 这里只记录分解参数，由适配器实际执行
            layer.factorized_weights = {
                "method": "cp",
                "original_shape": weights.shape,
                "rank": rank
            }

            # 标记为需要适配器处理
            layer.needs_adapter_factorization = True

            return layer

        except Exception as e:
            self.logger.error(f"CP decomposition failed: {str(e)}")
            return layer

    def _tucker_decomposition(self, layer: Any) -> Any:
        """
        使用Tucker分解进行张量分解

        主要适用于卷积层
        """
        try:
            # Tucker分解通常需要专门的张量分解库
            # 这里提供概念性实现

            # 获取权重
            weights = layer.weight

            # 确保是卷积层
            if len(weights.shape) != 4:
                self.logger.warning("Tucker decomposition is typically applied to 4D conv weights")
                return layer

            # 卷积层权重通常为 [out_channels, in_channels, kernel_h, kernel_w]
            out_c, in_c, k_h, k_w = weights.shape

            # 估计适合的秩
            # 对于Tucker分解，需要确定每个维度的秩
            out_rank = max(self.min_rank, int(out_c * self.rank_ratio))
            in_rank = max(self.min_rank, int(in_c * self.rank_ratio))

            # Tucker分解需要专用库实现
            # 这里只记录分解参数，由适配器实际执行
            layer.factorized_weights = {
                "method": "tucker",
                "original_shape": weights.shape,
                "ranks": [out_rank, in_rank, k_h, k_w]
            }

            # 标记为需要适配器处理
            layer.needs_adapter_factorization = True

            return layer

        except Exception as e:
            self.logger.error(f"Tucker decomposition failed: {str(e)}")
            return layer

    def evaluate(self, original: Any, compressed: Any) -> Dict:
        """
        评估分解前后的层性能

        Args:
            original: 原始层
            compressed: 分解后的层

        Returns:
            性能评估指标
        """
        metrics = {}

        try:
            # 评估分解误差
            if hasattr(compressed, "factorized_weights"):
                factorized_info = compressed.factorized_weights
                method = factorized_info.get("method", "unknown")

                # 记录分解信息
                metrics["factorization_method"] = method
                metrics["rank"] = factorized_info.get("rank", 0)

                # 对于SVD分解，可以计算近似误差
                if method == "svd" and "A" in factorized_info and "B" in factorized_info:
                    A = factorized_info["A"]
                    B = factorized_info["B"]

                    # 重构权重
                    if len(original.weight.shape) == 4 and len(B.shape) == 4:
                        # 卷积层
                        out_c, in_c, k_h, k_w = original.weight.shape
                        B_flat = B.reshape(B.shape[0], -1)
                        reconstructed = A @ B_flat
                        reconstructed = reconstructed.reshape(out_c, in_c, k_h, k_w)
                    else:
                        # 全连接层
                        reconstructed = A @ B

                    # 计算误差
                    error = np.mean(np.abs(original.weight - reconstructed))
                    rel_error = error / (np.mean(np.abs(original.weight)) + 1e-8)

                    metrics["absolute_error"] = float(error)
                    metrics["relative_error"] = float(rel_error)

                # 计算参数减少
                if "original_shape" in factorized_info:
                    orig_shape = factorized_info["original_shape"]
                    orig_params = np.prod(orig_shape)

                    if method == "svd" and "rank" in factorized_info:
                        # SVD分解的参数数量
                        rank = factorized_info["rank"]
                        if len(orig_shape) == 4:
                            # 卷积层: A(out_c x rank) + B(rank x in_c x k_h x k_w)
                            out_c, in_c, k_h, k_w = orig_shape
                            new_params = out_c * rank + rank * in_c * k_h * k_w
                        else:
                            # 全连接层: A(out_features x rank) + B(rank x in_features)
                            out_f, in_f = orig_shape
                            new_params = out_f * rank + rank * in_f

                        metrics["param_reduction"] = float(orig_params / new_params)

        except Exception as e:
            self.logger.error(f"Evaluation failed: {str(e)}")

        return metrics

    def can_apply(self, layer_type: str) -> bool:
        """检查张量分解是否适用于该层类型"""
        # 张量分解主要适用于卷积和全连接层
        applicable_types = [
            "Conv1d", "Conv2d", "Conv3d",
            "Linear", "Dense"
        ]
        return layer_type in applicable_types

    @classmethod
    def get_default_params(cls) -> Dict:
        """获取默认分解参数"""
        return {
            "rank_ratio": 0.5,
            "method": "svd",
            "min_rank": 4,
            "fine_tune": True
        }

    def get_expected_metrics(self, layer_info: Dict) -> Dict:
        """
        获取张量分解的预期性能影响

        Args:
            layer_info: 层的信息

        Returns:
            预期的性能指标变化
        """
        # 根据秩比例估算精度损失和参数减少
        # 秩比例越低，压缩率越高，但精度损失也越大

        # 估算精度影响
        if self.rank_ratio >= 0.7:
            accuracy_ratio = 0.98  # 高秩比例，精度损失小
        elif self.rank_ratio >= 0.5:
            accuracy_ratio = 0.95  # 中等秩比例
        elif self.rank_ratio >= 0.3:
            accuracy_ratio = 0.9  # 低秩比例
        else:
            accuracy_ratio = 0.85  # 极低秩比例

        # 估算大小减少（简化估计）
        # 根据秩比例估算压缩率
        layer_type = layer_info.get("type", "Unknown")

        if layer_type in ["Conv2d", "Conv3d"]:
            # 对于卷积层，factorization通常能实现更好的压缩率
            size_ratio = self.rank_ratio * 0.6
        else:
            # 对于全连接层
            size_ratio = self.rank_ratio * 0.8

        # 分解对推理延迟的影响通常与参数减少相关
        latency_ratio = self.rank_ratio * 0.7

        # 构建预期指标
        return {
            "accuracy_ratio": accuracy_ratio,
            "size_ratio": size_ratio,
            "latency_ratio": latency_ratio,
            "memory_ratio": size_ratio
        }