# uamcf/methods/parameter_sharing.py
import torch
import torch.nn as nn
import numpy as np
from typing import Dict, Any, Optional, List, Tuple, Union

from .base import CompressionMethod


class ParameterSharing(CompressionMethod):
    """实现参数共享压缩方法"""

    def __init__(self, config: Optional[Dict] = None):
        """
        初始化参数共享方法

        Args:
            config: 配置参数，包括：
                - share_strategy: 共享策略，可选 'sequential', 'similar', 'group'
                - similarity_threshold: 相似度阈值，用于'similar'策略
                - group_layers: 指定共享参数的层组
                - share_ratio: 要共享的参数比例
                - share_bias: 是否共享偏置参数
        """
        super().__init__(config)
        self.share_strategy = self.config.get("share_strategy", "similar")
        self.similarity_threshold = self.config.get("similarity_threshold", 0.8)
        self.group_layers = self.config.get("group_layers", [])
        self.share_ratio = self.config.get("share_ratio", 0.5)
        self.share_bias = self.config.get("share_bias", False)

        self.logger.info(f"Initialized parameter sharing with strategy={self.share_strategy}")

        # 存储共享参数的映射
        self.parameter_mapping = {}
        # 存储层之间的相似度
        self.layer_similarities = {}

    def compress(self, model: nn.Module) -> nn.Module:
        """
        应用参数共享压缩（作用于整个模型）

        Args:
            model: 要压缩的模型

        Returns:
            压缩后的模型
        """
        self.logger.info(f"Applying parameter sharing to model")

        try:
            # 获取所有可共享的层
            sharable_layers = self._get_sharable_layers(model)

            # 如果没有可共享的层，则返回原始模型
            if len(sharable_layers) <= 1:
                self.logger.warning("Not enough sharable layers found")
                return model

            # 根据不同策略应用共享
            if self.share_strategy == "sequential":
                compressed_model = self._apply_sequential_sharing(model, sharable_layers)
            elif self.share_strategy == "similar":
                compressed_model = self._apply_similarity_sharing(model, sharable_layers)
            elif self.share_strategy == "group":
                compressed_model = self._apply_group_sharing(model, sharable_layers)
            else:
                self.logger.error(f"Unknown sharing strategy: {self.share_strategy}")
                return model

            # 添加元数据
            compressed_model._parameter_shared = True
            compressed_model._share_strategy = self.share_strategy
            compressed_model._share_ratio = self.share_ratio

            return compressed_model

        except Exception as e:
            self.logger.error(f"Parameter sharing failed: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())
            return model

    def _get_sharable_layers(self, model: nn.Module) -> List[Tuple[str, nn.Module]]:
        """获取模型中所有可共享参数的层"""
        sharable_layers = []

        # 遍历所有命名模块
        for name, module in model.named_modules():
            # 只考虑有权重的叶子模块
            if not list(module.children()) and hasattr(module, 'weight') and module.weight is not None:
                # 检查是否是支持的层类型
                if isinstance(module, (nn.Linear, nn.Conv2d)) and len(module.weight.shape) >= 2:
                    sharable_layers.append((name, module))

        return sharable_layers

    def _apply_sequential_sharing(self, model: nn.Module, layers: List[Tuple[str, nn.Module]]) -> nn.Module:
        """应用顺序共享策略"""
        # 创建模型副本
        shared_model = type(model)()
        shared_model.load_state_dict(model.state_dict())

        # 按顺序分组共享
        groups = []
        current_group = []

        for i, (name, layer) in enumerate(layers):
            if not current_group:
                current_group.append((name, layer))
                continue

            # 检查当前层是否与组中第一个层的形状兼容
            first_layer = current_group[0][1]

            if (first_layer.weight.shape == layer.weight.shape and
                    (not hasattr(first_layer, 'bias') or not hasattr(layer, 'bias') or
                     first_layer.bias is None or layer.bias is None or
                     first_layer.bias.shape == layer.bias.shape)):

                current_group.append((name, layer))
            else:
                # 如果不兼容，开始新组
                if len(current_group) > 1:
                    groups.append(current_group)
                current_group = [(name, layer)]

        # 添加最后一组
        if len(current_group) > 1:
            groups.append(current_group)

        # 应用共享
        return self._share_parameters_in_groups(shared_model, groups)

    def _apply_similarity_sharing(self, model: nn.Module, layers: List[Tuple[str, nn.Module]]) -> nn.Module:
        """基于相似度共享策略"""
        # 创建模型副本
        shared_model = type(model)()
        shared_model.load_state_dict(model.state_dict())

        # 计算层之间的相似度
        self._compute_layer_similarities(layers)

        # 根据相似度分组
        groups = []
        used_layers = set()

        for i, (name_i, layer_i) in enumerate(layers):
            if name_i in used_layers:
                continue

            current_group = [(name_i, layer_i)]
            used_layers.add(name_i)

            for j, (name_j, layer_j) in enumerate(layers[i + 1:], i + 1):
                if name_j in used_layers:
                    continue

                # 检查形状兼容性
                if layer_i.weight.shape != layer_j.weight.shape:
                    continue

                # 检查偏置兼容性
                if (hasattr(layer_i, 'bias') and hasattr(layer_j, 'bias') and
                        ((layer_i.bias is None) != (layer_j.bias is None) or
                         (layer_i.bias is not None and layer_j.bias is not None and
                          layer_i.bias.shape != layer_j.bias.shape))):
                    continue

                # 检查相似度
                similarity_key = f"{name_i}_{name_j}"
                if similarity_key in self.layer_similarities and self.layer_similarities[
                    similarity_key] >= self.similarity_threshold:
                    current_group.append((name_j, layer_j))
                    used_layers.add(name_j)

            # 如果组中有多个层，添加到groups
            if len(current_group) > 1:
                groups.append(current_group)

        # 应用共享
        return self._share_parameters_in_groups(shared_model, groups)

    def _apply_group_sharing(self, model: nn.Module, layers: List[Tuple[str, nn.Module]]) -> nn.Module:
        """应用指定组共享策略"""
        # 创建模型副本
        shared_model = type(model)()
        shared_model.load_state_dict(model.state_dict())

        if not self.group_layers:
            self.logger.warning("No layer groups specified for group sharing strategy")
            return shared_model

        # 将层名称映射到层
        name_to_layer = {name: layer for name, layer in layers}

        # 处理指定的组
        groups = []
        for group_names in self.group_layers:
            # 获取组中的层
            group = []
            first_layer = None

            for name in group_names:
                if name in name_to_layer:
                    layer = name_to_layer[name]

                    # 检查兼容性
                    if first_layer is None:
                        first_layer = layer
                        group.append((name, layer))
                    elif (first_layer.weight.shape == layer.weight.shape and
                          (not hasattr(first_layer, 'bias') or not hasattr(layer, 'bias') or
                           first_layer.bias is None or layer.bias is None or
                           first_layer.bias.shape == layer.bias.shape)):
                        group.append((name, layer))
                    else:
                        self.logger.warning(f"Layer {name} is not compatible with other layers in group")

            # 如果组中有多个兼容层，添加到groups
            if len(group) > 1:
                groups.append(group)
            else:
                self.logger.warning(f"Group {group_names} has less than 2 compatible layers")

        # 应用共享
        return self._share_parameters_in_groups(shared_model, groups)

    def _compute_layer_similarities(self, layers: List[Tuple[str, nn.Module]]) -> None:
        """计算层之间的权重相似度"""
        for i, (name_i, layer_i) in enumerate(layers):
            for j, (name_j, layer_j) in enumerate(layers[i + 1:], i + 1):
                # 检查形状兼容性
                if layer_i.weight.shape != layer_j.weight.shape:
                    continue

                # 计算余弦相似度
                weight_i = layer_i.weight.view(-1)
                weight_j = layer_j.weight.view(-1)

                # 归一化
                weight_i_norm = torch.norm(weight_i)
                weight_j_norm = torch.norm(weight_j)

                if weight_i_norm > 0 and weight_j_norm > 0:
                    similarity = torch.dot(weight_i, weight_j) / (weight_i_norm * weight_j_norm)
                    similarity = abs(similarity.item())  # 取绝对值，因为方向不重要
                else:
                    similarity = 0.0

                # 保存相似度
                self.layer_similarities[f"{name_i}_{name_j}"] = similarity
                self.layer_similarities[f"{name_j}_{name_i}"] = similarity

    def _share_parameters_in_groups(self, model: nn.Module, groups: List[List[Tuple[str, nn.Module]]]) -> nn.Module:
        """在指定的组内共享参数"""
        # 遍历每个组
        for group_idx, group in enumerate(groups):
            # 跳过空组或只有一个层的组
            if len(group) <= 1:
                continue

            self.logger.info(f"Sharing parameters in group {group_idx} with {len(group)} layers")

            # 获取第一个层作为主层
            master_name, master_layer = group[0]
            master_module = self._get_module_by_name(model, master_name)

            # 决定要共享的参数部分（基于share_ratio）
            if self.share_ratio < 1.0:
                # 部分共享
                weight_shape = master_layer.weight.shape
                total_params = np.prod(weight_shape)
                shared_params = int(total_params * self.share_ratio)

                # 创建掩码
                flat_indices = np.random.choice(total_params, shared_params, replace=False)
                mask = torch.zeros(total_params, dtype=torch.bool, device=master_layer.weight.device)
                mask[flat_indices] = True
                mask = mask.view(weight_shape)
            else:
                # 全部共享
                mask = torch.ones_like(master_layer.weight, dtype=torch.bool)

            # 对组内其他层应用共享
            for layer_name, _ in group[1:]:
                layer_module = self._get_module_by_name(model, layer_name)

                # 共享权重
                shared_weight = torch.nn.Parameter(layer_module.weight.data.clone())
                # 应用掩码：共享部分使用主层权重，非共享部分保持原值
                shared_weight.data = torch.where(mask, master_module.weight.data, shared_weight.data)

                # 替换权重
                layer_module.weight = shared_weight

                # 记录参数映射
                self.parameter_mapping[layer_name] = {
                    "master": master_name,
                    "share_ratio": self.share_ratio,
                    "shared_params": int(torch.sum(mask).item())
                }

                # 如果需要，共享偏置
                if self.share_bias and hasattr(layer_module, 'bias') and layer_module.bias is not None:
                    layer_module.bias = torch.nn.Parameter(master_module.bias.data.clone())
                    self.parameter_mapping[layer_name]["shared_bias"] = True

        return model

    def _get_module_by_name(self, model: nn.Module, name: str) -> Optional[nn.Module]:
        """根据名称获取模块"""
        if name == '':
            return model

        names = name.split('.')
        module = model

        for n in names:
            if not hasattr(module, n):
                return None
            module = getattr(module, n)

        return module

    def evaluate(self, original: Any, compressed: Any) -> Dict:
        """
        评估参数共享前后的性能

        Args:
            original: 原始模型
            compressed: 压缩后的模型

        Returns:
            评估指标
        """
        metrics = {}

        try:
            # 计算参数数量
            orig_params = sum(p.numel() for p in original.parameters())

            # 对于压缩后的模型，需要减去共享的参数
            comp_params = orig_params
            shared_params = 0

            for mapping in self.parameter_mapping.values():
                shared_params += mapping.get("shared_params", 0)
                if mapping.get("shared_bias", False):
                    if isinstance(original, nn.Module) and isinstance(compressed, nn.Module):
                        # 估计偏置参数数量
                        for name, module in original.named_modules():
                            if name == mapping["master"] and hasattr(module, 'bias') and module.bias is not None:
                                shared_params += module.bias.numel()
                                break

            # 压缩后的参数 = 原始参数 - 共享参数
            comp_params -= shared_params

            metrics["original_parameters"] = int(orig_params)
            metrics["compressed_parameters"] = int(comp_params)
            metrics["shared_parameters"] = int(shared_params)
            metrics["compression_ratio"] = float(orig_params / comp_params) if comp_params > 0 else float('inf')

            # 添加共享信息
            metrics["share_strategy"] = self.share_strategy
            metrics["share_ratio"] = self.share_ratio
            metrics["shared_layers_count"] = len(self.parameter_mapping)
            metrics["shared_bias"] = self.share_bias

        except Exception as e:
            self.logger.error(f"Evaluation failed: {str(e)}")
            import traceback
            self.logger.error(traceback.format_exc())

        return metrics

    def can_apply(self, layer_type: str) -> bool:
        """参数共享应用于整个模型，而非单个层"""
        return False

    @classmethod
    def get_default_params(cls) -> Dict:
        """获取该压缩方法的默认参数"""
        return {
            "share_strategy": "similar",
            "similarity_threshold": 0.8,
            "share_ratio": 0.5,
            "share_bias": False,
            "group_layers": []
        }

    def get_expected_metrics(self, layer_info: Dict) -> Dict:
        """获取应用该压缩方法的预期影响"""
        # 参数共享对精度的影响取决于共享的层的相似度
        # 预期精度下降比例与共享率成正比
        accuracy_impact = 1.0 - (0.1 * self.share_ratio)
        accuracy_impact = max(0.85, accuracy_impact)  # 至少保持85%的精度

        # 大小减少与共享参数数量成正比
        size_ratio = 1.0 - (self.share_ratio * 0.4)  # 假设大致能减少 share_ratio * 40% 的大小

        return {
            "accuracy_ratio": accuracy_impact,
            "size_ratio": size_ratio,
            "latency_ratio": 1.0,  # 延迟通常不受影响
            "memory_ratio": size_ratio  # 内存减少率
        }