# === lib/prune.py ===

import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
from .layerwrapper import WrappedGPT
from .data import get_loaders

def run_pruning(args, model, tokenizer):

    if args.method == 'flap':
        prune_flap(args, model, tokenizer)
    elif args.method == 'wanda_sp':
        prune_wanda_sp(args, model, tokenizer)
    elif args.method == 'mag_sp':
        prune_magnitude_sp(args, model, tokenizer)
    else:
        raise ValueError(f"Unsupported pruning method: {args.method}")



def prune_flap(args, model, tokenizer):
    # 确定设备（CPU 或 GPU）
    device = next(model.parameters()).device
    # 模型能接受的最大序列长度
    seqlen = model.config.max_position_embeddings

    samples = get_loaders('wikitext2', args.nsamples, args.seed, seqlen, tokenizer)
    # 将列表拼成一个形状 [nsamples, seqlen] 的大张量
    data = torch.cat(samples, dim=0).to(device)
    # DataLoader：batch_size 设为 min(4, args.nsamples)
    loader = DataLoader(TensorDataset(data), batch_size=min(4, args.nsamples))

    # 收集模型中所有的 nn.Linear 层，并包装
    modules = [m for m in model.model.modules() if isinstance(m, nn.Linear)]
    wrappers = [WrappedGPT(m) for m in modules]
    grad_sens_list = [0.0] * len(modules)  # 用于累积每层的梯度敏感度

    # 切换到训练模式，以便计算梯度
    model.train()
    for batch in tqdm(loader, desc='FLAP Calibration'):
        input_ids = batch[0].to(device)  # 形状 [batch_size, seqlen]

        # 对 batch 中的每条样本逐条处理
        for i in range(input_ids.size(0)):
            single_input = input_ids[i:i+1]  # 形状 [1, seqlen]
            outputs = model(single_input, output_hidden_states=True)

            # 构造伪 loss，让模型计算梯度
            loss = outputs.logits.float().mean()
            loss.backward()

            # 累加每层权重的平均绝对梯度（梯度敏感度）
            for idx, module in enumerate(modules):
                if module.weight.grad is not None:
                    grad_sens_list[idx] += module.weight.grad.abs().mean().item()
            model.zero_grad()  # 清零，避免梯度累积

            for wrap, hidden in zip(wrappers, outputs.hidden_states[1:]):
                wrap.add_batch(hidden)

    # 归一化梯度敏感度：除以最后一个 batch 的样本数
    grad_sens_list = [s / input_ids.size(0) for s in grad_sens_list]
    model.eval()  # 切回 eval 模式

    stages = 3      # 将总剪枝比例分为 3 个阶段
    alpha = 0.2     # 梯度敏感度权重系数
    k = 0.2         # 标准差倍数，用于自适应阈值计算

    for stage in range(1, stages + 1):
        stage_ratio = args.pruning_ratio * stage / stages  # 当前阶段累计剪枝比例
        for idx, (module, wrap) in enumerate(zip(modules, wrappers)):
            score = wrap.stats * (1 + alpha * grad_sens_list[idx])
            mean, std = score.mean(), score.std()
            thresh = mean + k * std * (1 - stage_ratio)
            mask = score > thresh

            dim = module.weight.size(1)
            num_heads = getattr(model.config, 'num_attention_heads', None)
            if num_heads and dim % num_heads == 0:
                head_dim = dim // num_heads
                mask = mask.view(num_heads, head_dim).any(dim=1).repeat_interleave(head_dim)

            # 将不保留通道对应的权重列和 bias 置零
            module.weight.data.mul_(mask.float().unsqueeze(0))
            if module.bias is not None:
                module.bias.data.mul_(mask.float())

    for wrap in wrappers:
        wrap.free()



def prune_wanda_sp(args, model, tokenizer, device=None):
    import torch.nn as nn
    from torch.utils.data import DataLoader, TensorDataset
    from tqdm import tqdm
    from .data import get_loaders

    if device is None:
        device = next(model.parameters()).device
    seqlen = model.config.max_position_embeddings

    # 加载样本数据
    samples = get_loaders('wikitext2', args.nsamples, args.seed, seqlen, tokenizer)
    data = torch.cat(samples, dim=0).to(device)
    loader = DataLoader(TensorDataset(data), batch_size=min(32, args.nsamples))

    # 收集所有 Linear 层
    modules = [m for m in model.model.modules() if isinstance(m, nn.Linear)]
    activations = {id(m): torch.zeros(m.in_features, device=device) for m in modules}

    # 模型评估模式，禁用 dropout
    model.eval()
    with torch.no_grad():
        for batch in tqdm(loader, desc='Wanda Calibration'):
            input_ids = batch[0].to(device)
            outputs = model(input_ids, output_hidden_states=True)
            hidden_states = outputs.hidden_states[1:]

            for mod, h in zip(modules, hidden_states):
                # h: (batch, seq, dim)
                h = h.view(-1, h.size(-1))  # -> (batch*seq, dim)
                if activations[id(mod)].numel() == h.size(1):
                    activations[id(mod)] += h.abs().sum(dim=0)

    # 根据激活和权重分数排序并剪枝
    for mod in modules:
        score = (mod.weight.data * activations[id(mod)].unsqueeze(0)).abs().mean(dim=0)

        k = max(1, int(mod.out_features * (1 - args.pruning_ratio)))
        if k > score.numel():
            k = score.numel()

        # 计算分数阈值
        thresh, _ = torch.kthvalue(score, k)
        mask = score >= thresh

        # 应用 mask 到权重和 bias
        mod.weight.data.mul_(mask.float().unsqueeze(0))
        if mod.bias is not None:
            mod.bias.data.mul_(mask.float())


def prune_magnitude_sp(args, model, tokenizer, device=None):
    if device is None:
        device = next(model.parameters()).device

    print("[INFO] Running magnitude-based pruning...")

    modules = [m for m in model.model.modules() if isinstance(m, torch.nn.Linear)]

    for idx, mod in enumerate(modules):
        score = mod.weight.data.abs().mean(dim=0)  # shape: [in_features]

        k = int(mod.out_features * (1 - args.pruning_ratio))  # 保留的通道数

        # 安全性判断，防止 k = 0 或超过长度报错
        if k <= 0 or k > score.numel():
            print(f"[WARN] Layer {idx} skipped (k={k} out of range, out_features={mod.out_features})")
            continue

        thresh, _ = torch.kthvalue(score, k)
        mask = score >= thresh  # 保留大于等于阈值的通道

        mod.weight.data.mul_(mask.float().unsqueeze(0))
        if mod.bias is not None:
            mod.bias.data.mul_(mask.float())

    print("[INFO] Magnitude pruning complete.")
