from typing import Optional

import numpy as np
from transformers import PreTrainedModel
from transformers.trainer_callback import TrainerCallback


class DynamicLayerActivationCallback(TrainerCallback):
    def __init__(
        self,
        n_layers: int,
        interval_steps: int,
        model: PreTrainedModel,
        lisa_layers_attribute: Optional[str] = None,
    ):
        super().__init__()
        self.n_layers = n_layers
        self.interval_steps = interval_steps
        self.model = model

        # Determine the way to access layers based on the model type
        class_to_layers_map = {
            "LlamaForCausalLM": "model.model.layers",
            "Qwen2ForCausalLM": "model.model.layers",
            "MistralForCausalLM": "model.model.layers",
            "MixtralForCausalLM": "model.model.layers",
            "GemmaForCausalLM": "model.model.layers",
            "GPT2LMHeadModel": "model.transformer.h",
            "HymbaForCausalLM": "model.model.layers",
        }
        model_class_name = self.model.__class__.__name__
        if model_class_name in class_to_layers_map:
            self.layers_attribute = class_to_layers_map[model_class_name]
        else:
            assert lisa_layers_attribute is not None, "Please provide the attribute to access the layers of the model."
            self.layers_attribute = lisa_layers_attribute
        self.total_layers = len(
            eval("self." + self.layers_attribute)
        )  # Dynamically execute to get the number of layers

        self.active_layers_indices = []

    def freeze_all_layers(self):
        layers = eval("self." + self.layers_attribute)  # Dynamically execute to get layers
        for layer in layers:
            for param in layer.parameters():
                param.requires_grad = False

    def on_step_begin(self, args, state, control, **kwargs):
        # Check if it's time to switch active layers, including at step 0
        if state.global_step % self.interval_steps == 0:
            self.switch_active_layers()
    
    
    def switch_active_layers(self):
        """
        高级版：基于梯度重要性的自适应层选择（只修改此方法，保持其余代码不变）
        - 首次调用：为每层参数注册 backward hook，在线统计该层梯度范数的 EMA 作为重要性
        - 每次切换：优先激活重要性最高的 n_layers 层，并加入少量随机探索与轻微粘性抑制抖动
        """
        # ---------- 懒加载：首次调用时注册 hooks，建立重要性缓存 ----------
        if not hasattr(self, "_imp_initialized"):
            layers = eval("self." + self.layers_attribute)
            self._L = len(layers)
            # 重要性分数（梯度范数的 EMA）
            self._importance = np.zeros(self._L, dtype=np.float64)
            # EMA 衰减系数（越接近1越平滑）
            self._beta = 0.9
            # 探索比例（每次切换保留少量随机层，避免陷入局部最优）
            self._epsilon = 0.1
            # 粘性：对上一次被选中的层加一点分数奖励，降低频繁闪烁
            self._stickiness = 0.05
            self._last_active = set()
            self._hooks = []

            # 为每层的所有参数注册 backward hook，累积该层的梯度强度
            def make_param_hook(layer_idx: int):
                def _hook(grad):
                    if grad is None:
                        return
                    # 用 L2 范数作为强度；转 float 避免 bf16 精度误差
                    g2 = grad.detach().float().norm(2).item()
                    # 重要性 EMA 更新
                    self._importance[layer_idx] = (
                        self._beta * self._importance[layer_idx]
                        + (1.0 - self._beta) * g2
                    )
                return _hook

            for i, layer in enumerate(layers):
                for p in layer.parameters():
                    # 无论当前是否 requires_grad，hook 都先挂上；当层被解冻时会生效
                    h = p.register_hook(make_param_hook(i))
                    self._hooks.append(h)

            self._imp_initialized = True
            # 冷启动时先不依赖重要性，直接让后续流程去选择
            # （首次切换没有历史梯度信息）

        # ---------- 正式切换 ----------
        # 先冻结所有层
        self.freeze_all_layers()
        layers = eval("self." + self.layers_attribute)

        scores = self._importance.copy()

        # 冷启动：若分数全为0（还没积累到梯度），先激活最顶的 n_layers 层
        if np.all(scores == 0):
            start = max(0, self._L - self.n_layers)
            chosen = list(range(start, self._L))
        else:
            # 给上次激活的层一点“粘性奖励”，降低来回抖动
            if len(self._last_active) > 0:
                idx = np.fromiter(self._last_active, dtype=int)
                # 奖励按当前最大分数的一个小比例给到
                bonus = self._stickiness * (scores.max() + 1e-8)
                scores[idx] += bonus

            k = min(self.n_layers, self._L)
            # 先取分数 Top-k
            topk = np.argpartition(-scores, k - 1)[:k]

            # epsilon-greedy：用少量随机层替换最弱的若干 Top-k，做探索
            num_random = max(1, int(np.ceil(self._epsilon * k)))
            if self._L > k and num_random > 0:
                pool = np.setdiff1d(np.arange(self._L), topk, assume_unique=False)
                if len(pool) > 0:
                    rand = np.random.choice(pool, size=min(num_random, len(pool)), replace=False)
                    # 找到 topk 中分数最低的若干位置，用随机候选替换
                    weakest_in_topk = np.argpartition(scores[topk], num_random - 1)[:len(rand)]
                    topk[weakest_in_topk] = rand

            chosen = sorted(topk.tolist())

        self.active_layers_indices = chosen
        print(f"[Adaptive] Activating layers (by EMA grad-importance): {self.active_layers_indices}", flush=True)

        # 只解冻被选中的层
        for idx in self.active_layers_indices:
            for param in layers[idx].parameters():
                param.requires_grad = True

        # 记录这次的选择用于下次粘性奖励
        self._last_active = set(self.active_layers_indices)
                