# uamcf/methods/low_rank.py
import torch
import torch.nn as nn
import numpy as np
from typing import Dict, Any, Optional, List, Tuple, Union

from .base import CompressionMethod


class LowRankDecomposition(CompressionMethod):
    """实现低秩分解压缩方法"""

    def __init__(self, config: Optional[Dict] = None):
        """
        初始化低秩分解方法

        Args:
            config: 配置参数，包括：
                - rank_ratio: 秩比例，值在(0, 1)之间，表示保留的秩与原始秩的比例
                - min_rank: 最小秩，确保分解后的秩不会太小
                - decomposition: 分解方法，可选'svd'或'cp'
        """
        super().__init__(config)
        self.rank_ratio = self.config.get("rank_ratio", 0.25)
        self.min_rank = self.config.get("min_rank", 4)
        self.decomposition = self.config.get("decomposition", "svd")

        self.logger.info(f"Initialized low-rank decomposition with rank_ratio={self.rank_ratio}, "
                         f"decomposition={self.decomposition}")

    def compress(self, layer: Any) -> Any:
        """
        应用低秩分解压缩层的权重

        Args:
            layer: 要压缩的层

        Returns:
            压缩后的层或层序列
        """
        self.logger.info(f"Applying low-rank decomposition to layer")

        try:
            # 检查层类型
            if isinstance(layer, nn.Linear):
                return self._decompose_linear(layer)
            elif isinstance(layer, nn.Conv2d) and layer.kernel_size[0] == 1:
                return self._decompose_1x1_conv(layer)
            else:
                self.logger.warning(f"Layer type {layer.__class__.__name__} not supported for low-rank decomposition")
                return layer

        except Exception as e:
            self.logger.error(f"Low-rank decomposition failed: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())
            return layer

    def _decompose_linear(self, layer: nn.Linear) -> nn.Sequential:
        """分解全连接层"""
        weights = layer.weight.data
        bias = layer.bias

        # 计算目标秩
        original_shape = weights.shape
        original_rank = min(original_shape)
        target_rank = max(self.min_rank, int(original_rank * self.rank_ratio))
        target_rank = min(target_rank, original_rank)

        if self.decomposition == "svd":
            # 使用SVD分解
            U, S, V = torch.svd(weights)
            # 保留前k个奇异值及其对应的向量
            U_k = U[:, :target_rank]
            S_k = torch.diag(S[:target_rank])
            V_k = V[:, :target_rank]

            # 创建两个较小的全连接层
            fc1 = nn.Linear(original_shape[1], target_rank, bias=False)
            fc2 = nn.Linear(target_rank, original_shape[0], bias=bool(bias is not None))

            # 设置权重
            fc1.weight.data = torch.matmul(S_k.sqrt(), V_k.t())
            fc2.weight.data = torch.matmul(U_k, S_k.sqrt())

            # 设置偏置
            if bias is not None:
                fc2.bias.data = bias.data

            # 创建序列
            decomposed = nn.Sequential(fc1, fc2)

        else:  # "cp" 或其他分解方法
            # 简单的矩阵分解（CP分解的简化版本）
            fc1 = nn.Linear(original_shape[1], target_rank, bias=False)
            fc2 = nn.Linear(target_rank, original_shape[0], bias=bool(bias is not None))

            # 初始化权重（随机初始化 + 微调）
            nn.init.kaiming_normal_(fc1.weight)
            nn.init.kaiming_normal_(fc2.weight)

            # 设置偏置
            if bias is not None:
                fc2.bias.data = bias.data

            # 创建序列
            decomposed = nn.Sequential(fc1, fc2)

            # 使用原始权重优化初始值（简化版优化）
            for _ in range(10):  # 简单迭代几次优化
                # 前向: A ≈ B × C
                approx = fc2.weight.data @ fc1.weight.data
                # 计算误差
                error = weights - approx
                # 更新权重
                fc1.weight.data = fc1.weight.data + 0.1 * (fc2.weight.data.t() @ error).t()
                fc2.weight.data = fc2.weight.data + 0.1 * (error @ fc1.weight.data.t())

        # 添加元数据
        decomposed.original_layer_type = layer.__class__.__name__
        decomposed.decomposition_method = self.decomposition
        decomposed.original_shape = original_shape
        decomposed.compressed_rank = target_rank
        decomposed.original_rank = original_rank

        return decomposed

    def _decompose_1x1_conv(self, layer: nn.Conv2d) -> nn.Sequential:
        """分解1x1卷积层（可视为全连接层）"""
        # 提取参数
        in_channels = layer.in_channels
        out_channels = layer.out_channels
        kernel_size = layer.kernel_size
        stride = layer.stride
        padding = layer.padding
        dilation = layer.dilation
        groups = layer.groups
        bias = layer.bias

        # 对于1x1卷积，权重形状为 [out_channels, in_channels, 1, 1]
        weights = layer.weight.data.squeeze()

        # 计算目标秩
        original_rank = min(out_channels, in_channels)
        target_rank = max(self.min_rank, int(original_rank * self.rank_ratio))
        target_rank = min(target_rank, original_rank)

        if self.decomposition == "svd":
            # 使用SVD分解
            weights_2d = weights.reshape(out_channels, in_channels)
            U, S, V = torch.svd(weights_2d)

            # 保留前k个奇异值
            U_k = U[:, :target_rank]
            S_k = torch.diag(S[:target_rank])
            V_k = V[:, :target_rank]

            # 创建两个较小的卷积层
            conv1 = nn.Conv2d(in_channels, target_rank, kernel_size=1, stride=1,
                              padding=0, bias=False)
            conv2 = nn.Conv2d(target_rank, out_channels, kernel_size=1, stride=stride,
                              padding=padding, bias=bool(bias is not None))

            # 设置权重
            conv1.weight.data = torch.matmul(S_k.sqrt(), V_k.t()).view(target_rank, in_channels, 1, 1)
            conv2.weight.data = torch.matmul(U_k, S_k.sqrt()).view(out_channels, target_rank, 1, 1)

            # 设置偏置
            if bias is not None:
                conv2.bias.data = bias.data

        else:
            # 简单矩阵分解
            conv1 = nn.Conv2d(in_channels, target_rank, kernel_size=1, stride=1,
                              padding=0, bias=False)
            conv2 = nn.Conv2d(target_rank, out_channels, kernel_size=1, stride=stride,
                              padding=padding, bias=bool(bias is not None))

            # 初始化权重
            nn.init.kaiming_normal_(conv1.weight)
            nn.init.kaiming_normal_(conv2.weight)

            # 设置偏置
            if bias is not None:
                conv2.bias.data = bias.data

            # 使用原始权重优化初始值
            weights_2d = weights.reshape(out_channels, in_channels)
            for _ in range(10):
                # 计算近似值
                approx = (conv2.weight.data.view(out_channels, target_rank) @
                          conv1.weight.data.view(target_rank, in_channels))
                # 计算误差
                error = weights_2d - approx
                # 更新权重
                conv1.weight.data = (conv1.weight.data.view(target_rank, in_channels) +
                                     0.1 * (conv2.weight.data.view(out_channels, target_rank).t() @
                                            error)).view(target_rank, in_channels, 1, 1)
                conv2.weight.data = (conv2.weight.data.view(out_channels, target_rank) +
                                     0.1 * (error @
                                            conv1.weight.data.view(target_rank, in_channels).t())).view(out_channels,
                                                                                                        target_rank, 1,
                                                                                                        1)

        # 创建序列
        decomposed = nn.Sequential(conv1, conv2)

        # 添加元数据
        decomposed.original_layer_type = layer.__class__.__name__
        decomposed.decomposition_method = self.decomposition
        decomposed.original_shape = layer.weight.shape
        decomposed.compressed_rank = target_rank
        decomposed.original_rank = original_rank

        return decomposed

    def evaluate(self, original: Any, compressed: Any) -> Dict:
        """
        评估低秩分解前后的性能

        Args:
            original: 原始层
            compressed: 压缩后的层序列

        Returns:
            评估指标
        """
        metrics = {}

        try:
            # 对于序列类型，需要组合计算
            if isinstance(compressed, nn.Sequential) and hasattr(compressed, 'original_shape'):
                # 提取元数据
                metrics["decomposition_method"] = compressed.decomposition_method
                metrics["original_rank"] = compressed.original_rank
                metrics["compressed_rank"] = compressed.compressed_rank
                metrics["rank_ratio"] = compressed.compressed_rank / compressed.original_rank

                # 计算参数数量
                original_params = np.prod(compressed.original_shape)
                if hasattr(original, 'bias') and original.bias is not None:
                    original_params += original.bias.numel()

                compressed_params = sum(p.numel() for p in compressed.parameters())

                metrics["original_parameters"] = int(original_params)
                metrics["compressed_parameters"] = int(compressed_params)
                metrics["compression_ratio"] = float(original_params / compressed_params)

                # 近似误差计算
                if isinstance(original, nn.Linear) and len(compressed) == 2:
                    # 重建权重矩阵
                    reconstructed = compressed[1].weight.data @ compressed[0].weight.data
                    original_weight = original.weight.data

                    # 计算相对误差
                    abs_err = torch.mean(torch.abs(original_weight - reconstructed)).item()
                    rel_err = abs_err / (torch.mean(torch.abs(original_weight)).item() + 1e-8)

                    metrics["absolute_error"] = float(abs_err)
                    metrics["relative_error"] = float(rel_err)

                elif isinstance(original, nn.Conv2d) and len(compressed) == 2:
                    # 对于卷积层的重建误差
                    if original.kernel_size[0] == 1 and original.kernel_size[1] == 1:
                        # 1x1卷积
                        reconstructed = torch.matmul(
                            compressed[1].weight.data.view(compressed[1].weight.size(0), -1),
                            compressed[0].weight.data.view(compressed[0].weight.size(0), -1).t()
                        ).view_as(original.weight.data)

                        # 计算相对误差
                        abs_err = torch.mean(torch.abs(original.weight.data - reconstructed)).item()
                        rel_err = abs_err / (torch.mean(torch.abs(original.weight.data)).item() + 1e-8)

                        metrics["absolute_error"] = float(abs_err)
                        metrics["relative_error"] = float(rel_err)

        except Exception as e:
            self.logger.error(f"Evaluation failed: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())

        return metrics

    def can_apply(self, layer_type: str) -> bool:
        """检查该压缩方法是否适用于特定类型的层"""
        applicable_types = ["Linear", "Conv2d"]
        return layer_type in applicable_types

    @classmethod
    def get_default_params(cls) -> Dict:
        """获取该压缩方法的默认参数"""
        return {
            "rank_ratio": 0.25,
            "min_rank": 4,
            "decomposition": "svd"
        }

    def get_expected_metrics(self, layer_info: Dict) -> Dict:
        """获取应用该压缩方法的预期影响"""
        # 预计低秩分解会保留大约90-95%的精度
        # 但会大幅减少参数数量，尤其是对于大的全连接层
        accuracy_impact = 0.95

        # 参数减少取决于秩比例和层形状
        if 'input_shape' in layer_info and 'output_shape' in layer_info:
            in_size = layer_info['input_shape'][0] if isinstance(layer_info['input_shape'], list) else layer_info[
                'input_shape']
            out_size = layer_info['output_shape'][0] if isinstance(layer_info['output_shape'], list) else layer_info[
                'output_shape']

            original_params = in_size * out_size
            compressed_params = self.rank_ratio * (in_size + out_size)

            size_ratio = compressed_params / original_params
            # 确保比例在合理范围内
            size_ratio = max(0.1, min(0.9, size_ratio))
        else:
            # 默认假设可以减少到原来的50%
            size_ratio = 0.5

        # 低秩分解通常会略微提高延迟（由于序列执行）
        latency_ratio = 1.1

        return {
            "accuracy_ratio": accuracy_impact,
            "size_ratio": size_ratio,
            "latency_ratio": latency_ratio,
            "memory_ratio": size_ratio
        }