# uamcf/methods/weight_sharing.py
import torch
import torch.nn as nn
import numpy as np
from typing import Dict, Any, Optional, List, Tuple, Union
from sklearn.cluster import KMeans

from .base import CompressionMethod


class WeightSharing(CompressionMethod):
    """实现权重共享压缩方法"""

    def __init__(self, config: Optional[Dict] = None):
        """
        初始化权重共享方法

        Args:
            config: 配置参数，包括：
                - n_clusters: 聚类中心数量，即共享权重的数量
                - method: 共享方法，'cluster'表示使用聚类，'hash'表示使用哈希函数
                - preserve_sparsity: 是否保留权重稀疏性（0值不参与共享）
        """
        super().__init__(config)
        self.n_clusters = self.config.get("n_clusters", 32)
        self.method = self.config.get("method", "cluster")
        self.preserve_sparsity = self.config.get("preserve_sparsity", True)

        self.logger.info(f"Initialized weight sharing with {self.n_clusters} shared weights, "
                         f"method={self.method}")

        # 存储每层的权重映射
        self.weight_mappings = {}
        self.shared_weights = {}

    def compress(self, layer: Any) -> Any:
        """
        应用权重共享压缩

        Args:
            layer: 要压缩的层

        Returns:
            压缩后的层
        """
        self.logger.info(f"Applying weight sharing to layer")

        try:
            # 检查层类型
            if not hasattr(layer, 'weight') or layer.weight is None:
                self.logger.warning(f"Layer without weights cannot be shared")
                return layer

            # 获取权重数据
            weight = layer.weight.data

            # 保存原始权重形状
            original_shape = weight.shape

            # 展平权重
            flat_weight = weight.view(-1)

            # 处理稀疏性（如果需要保留）
            if self.preserve_sparsity:
                # 找出非零元素
                non_zero_mask = flat_weight != 0
                non_zero_indices = torch.nonzero(non_zero_mask).squeeze()

                # 如果没有非零元素，则返回原始层
                if non_zero_indices.numel() == 0:
                    return layer

                # 只对非零元素应用共享
                non_zero_weights = flat_weight[non_zero_indices]
            else:
                # 所有元素都参与共享
                non_zero_weights = flat_weight
                non_zero_indices = torch.arange(flat_weight.numel())

            # 应用共享方法
            if self.method == "cluster":
                # 使用KMeans进行聚类
                kmeans = KMeans(n_clusters=min(self.n_clusters, non_zero_weights.numel()),
                                random_state=0, n_init=10)

                # 转换为numpy数组并调整形状
                weights_np = non_zero_weights.cpu().numpy().reshape(-1, 1)

                # 执行聚类
                kmeans.fit(weights_np)

                # 获取聚类中心和分配
                centroids = kmeans.cluster_centers_.squeeze()
                assignments = kmeans.labels_

                # 创建映射字典
                weight_mapping = {i: centroids[label] for i, label in enumerate(assignments)}

                # 保存共享权重信息
                layer_name = layer.__class__.__name__
                self.weight_mappings[layer_name] = assignments
                self.shared_weights[layer_name] = torch.tensor(centroids, device=weight.device,
                                                               dtype=weight.dtype)

                # 创建新权重
                new_weight = torch.zeros_like(flat_weight)

                # 填充共享权重
                for i, idx in enumerate(non_zero_indices):
                    new_weight[idx] = centroids[assignments[i]]

            elif self.method == "hash":
                # 使用哈希函数进行权重共享
                # 计算权重范围
                min_val = non_zero_weights.min().item()
                max_val = non_zero_weights.max().item()

                # 创建等距的共享权重值
                shared_values = torch.linspace(min_val, max_val, self.n_clusters,
                                               device=weight.device, dtype=weight.dtype)

                # 创建新权重
                new_weight = torch.zeros_like(flat_weight)

                # 为每个权重找到最近的共享值
                for i, idx in enumerate(non_zero_indices):
                    # 找到最接近的共享值索引
                    value = non_zero_weights[i]
                    distances = torch.abs(shared_values - value)
                    nearest_idx = torch.argmin(distances).item()

                    # 分配共享值
                    new_weight[idx] = shared_values[nearest_idx]

                # 保存共享权重信息
                layer_name = layer.__class__.__name__
                self.shared_weights[layer_name] = shared_values

            else:
                self.logger.error(f"Unknown weight sharing method: {self.method}")
                return layer

            # 还原权重形状
            new_weight = new_weight.view(original_shape)

            # 创建新层
            shared_layer = type(layer)(*layer.__init__.__defaults__)
            shared_layer.load_state_dict(layer.state_dict())

            # 更新权重
            shared_layer.weight.data = new_weight

            # 添加元数据
            shared_layer._weight_shared = True
            shared_layer._n_clusters = self.n_clusters
            shared_layer._sharing_method = self.method

            return shared_layer

        except Exception as e:
            self.logger.error(f"Weight sharing failed: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())
            return layer

    def evaluate(self, original: Any, compressed: Any) -> Dict:
        """
        评估权重共享前后的性能

        Args:
            original: 原始层
            compressed: 压缩后的层

        Returns:
            评估指标
        """
        metrics = {}

        try:
            # 计算唯一权重值的数量
            orig_weight = original.weight.data
            comp_weight = compressed.weight.data

            unique_orig = torch.unique(orig_weight).numel()
            unique_comp = torch.unique(comp_weight).numel()

            metrics["original_unique_weights"] = int(unique_orig)
            metrics["compressed_unique_weights"] = int(unique_comp)
            metrics["unique_weights_ratio"] = float(unique_comp / unique_orig)

            # 计算模型大小减少（考虑需要存储的索引）
            orig_size = orig_weight.numel() * orig_weight.element_size()

            # 压缩后大小 = 共享权重值大小 + 索引大小
            # 索引位宽取决于共享权重数量
            bits_per_index = int(np.ceil(np.log2(self.n_clusters)))
            bytes_per_index = max(1, int(np.ceil(bits_per_index / 8)))

            comp_size = (self.n_clusters * comp_weight.element_size() +
                         comp_weight.numel() * bytes_per_index)

            metrics["original_size_bytes"] = int(orig_size)
            metrics["compressed_size_bytes"] = int(comp_size)
            metrics["compression_ratio"] = float(orig_size / comp_size) if comp_size > 0 else 1.0

            # 计算权重误差
            abs_err = torch.mean(torch.abs(orig_weight - comp_weight)).item()
            rel_err = abs_err / (torch.mean(torch.abs(orig_weight)).item() + 1e-8)

            metrics["absolute_error"] = float(abs_err)
            metrics["relative_error"] = float(rel_err)

            # 添加共享信息
            metrics["n_clusters"] = self.n_clusters
            metrics["sharing_method"] = self.method

        except Exception as e:
            self.logger.error(f"Evaluation failed: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())

        return metrics

    def can_apply(self, layer_type: str) -> bool:
        """检查该压缩方法是否适用于特定类型的层"""
        applicable_types = ["Linear", "Conv2d", "Conv1d"]
        return layer_type in applicable_types

    @classmethod
    def get_default_params(cls) -> Dict:
        """获取该压缩方法的默认参数"""
        return {
            "n_clusters": 32,
            "method": "cluster",
            "preserve_sparsity": True
        }

    def get_expected_metrics(self, layer_info: Dict) -> Dict:
        """获取应用该压缩方法的预期影响"""
        # 权重共享通常能保留较高精度
        # 压缩比取决于共享权重数量和原始权重数量
        if 'parameters' in layer_info:
            params = layer_info['parameters']

            # 计算理论压缩比
            bits_per_index = int(np.ceil(np.log2(self.n_clusters)))
            bytes_per_index = max(1, int(np.ceil(bits_per_index / 8)))

            orig_size = params * 4  # 假设FP32
            comp_size = (self.n_clusters * 4 + params * bytes_per_index)

            size_ratio = comp_size / orig_size
            # 限制在合理范围内
            size_ratio = max(0.05, min(0.9, size_ratio))
        else:
            # 默认假设能减少到30%
            size_ratio = 0.3

        return {
            "accuracy_ratio": 0.97,  # 精度保持率
            "size_ratio": size_ratio,  # 大小减少率
            "latency_ratio": 1.0,  # 延迟改进率（通常影响不大）
            "memory_ratio": size_ratio  # 内存减少率
        }