{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "4737b1ef",
   "metadata": {},
   "source": [
    "# APGCC 模型剪枝实验笔记本\n",
    "\n",
    "本笔记本演示在APGCC（占位名）或任意PyTorch模型上应用多种剪枝策略：\n",
    "- 全局幅度非结构化剪枝（Magnitude）\n",
    "- SNIP 梯度重要性剪枝\n",
    "- PyTorch nn.utils.prune 的L1非结构化剪枝与固化\n",
    "- 简单“结构化样式”神经元级剪枝示例（按输出通道L1范数）\n",
    "- 稀疏度统计与基线/剪后评估、保存模型\n",
    "\n",
    "如你的项目中已有实际 `apgcc` 模型，请在“加载或定义APGCC模型”单元中替换为真实导入。"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "75ba9dec",
   "metadata": {},
   "source": [
    "## 1. 环境与库导入"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e8779efd",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os, time, random\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "seed = int(os.getenv('SEED', '42'))\n",
    "random.seed(seed); np.random.seed(seed); torch.manual_seed(seed)\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print('Using device:', device)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "26c78908",
   "metadata": {},
   "source": [
    "## 2. 加载或定义 APGCC 模型（占位）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "29584c69",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 若你的项目中已有 apgcc 模型，请替换为真实导入：\n",
    "# from apgcc import APGCCModel\n",
    "# model = APGCCModel(...).to(device)\n",
    "\n",
    "# 这里提供一个可运行的占位模型：简单的“图”前向 = A @ X -> MLP\n",
    "class APGCCPlaceholder(nn.Module):\n",
    "    def __init__(self, in_dim=32, hidden=64, out_dim=2):\n",
    "        super().__init__()\n",
    "        self.fc1 = nn.Linear(in_dim, hidden)\n",
    "        self.act = nn.ReLU()\n",
    "        self.fc2 = nn.Linear(hidden, out_dim)\n",
    "    def forward(self, x, adj):\n",
    "        # x: [N, F], adj: [N, N]\n",
    "        x = torch.matmul(adj, x)  # 简化的聚合\n",
    "        x = self.act(self.fc1(x))\n",
    "        x = self.fc2(x)\n",
    "        return x\n",
    "\n",
    "model = APGCCPlaceholder().to(device)\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0f19c7b0",
   "metadata": {},
   "source": [
    "## 3. 构造合成图数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c2935f88",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SyntheticGraphDataset(Dataset):\n",
    "    def __init__(self, num_nodes=256, in_dim=32, num_classes=2):\n",
    "        self.num_nodes = num_nodes\n",
    "        self.in_dim = in_dim\n",
    "        self.num_classes = num_classes\n",
    "        # 节点特征\n",
    "        self.x = torch.randn(num_nodes, in_dim)\n",
    "        # 标签：根据某些特征阈值构造\n",
    "        self.labels = (self.x[:, 0] + self.x[:, 1] > 0).long() % num_classes\n",
    "        # 构造随机邻接（无向）\n",
    "        adj = torch.zeros(num_nodes, num_nodes)\n",
    "        for i in range(num_nodes):\n",
    "            for j in range(i+1, num_nodes):\n",
    "                if torch.rand(1).item() < 0.02:  # 较稀疏\n",
    "                    adj[i, j] = 1\n",
    "                    adj[j, i] = 1\n",
    "        # 加自环 + 归一化（简单）\n",
    "        adj += torch.eye(num_nodes)\n",
    "        deg = adj.sum(-1, keepdim=True)\n",
    "        self.adj = adj / deg\n",
    "    def __len__(self):\n",
    "        return self.num_nodes\n",
    "    def __getitem__(self, idx):\n",
    "        return {\n",
    "            'x': self.x[idx],\n",
    "            'adj': self.adj,  # 所有节点共享图结构\n",
    "            'label': self.labels[idx]\n",
    "        }\n",
    "\n",
    "train_ds = SyntheticGraphDataset(num_nodes=256)\n",
    "val_ds = SyntheticGraphDataset(num_nodes=256)\n",
    "train_loader = DataLoader(train_ds, batch_size=32, shuffle=True)\n",
    "val_loader = DataLoader(val_ds, batch_size=64)\n",
    "print('Dataset ready:', len(train_ds), len(val_ds))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1f400155",
   "metadata": {},
   "source": [
    "## 4. 评估函数与基线测量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fa1f9d1b",
   "metadata": {},
   "outputs": [],
   "source": [
    "@torch.no_grad()\n",
    "def evaluate(dataloader, model):\n",
    "    model.eval()\n",
    "    total, correct, losses = 0, 0, []\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    start = time.time()\n",
    "    for batch in dataloader:\n",
    "        x = batch['x'].to(device)\n",
    "        adj = batch['adj'].to(device)\n",
    "        y = batch['label'].to(device)\n",
    "        logits = model(x, adj)\n",
    "        loss = criterion(logits, y)\n",
    "        preds = logits.argmax(-1)\n",
    "        correct += (preds == y).sum().item()\n",
    "        total += y.size(0)\n",
    "        losses.append(loss.item())\n",
    "    elapsed = time.time() - start\n",
    "    return {\n",
    "        'acc': correct / max(1, total),\n",
    "        'loss': float(np.mean(losses)) if losses else 0.0,\n",
    "        'time_sec': elapsed,\n",
    "        'throughput': total / max(1e-6, elapsed)\n",
    "    }\n",
    "\n",
    "def count_parameters(model):\n",
    "    return sum(p.numel() for p in model.parameters())\n",
    "\n",
    "base_metrics = evaluate(val_loader, model)\n",
    "print('Baseline:', base_metrics)\n",
    "print('Params:', count_parameters(model))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d9601ccc",
   "metadata": {},
   "source": [
    "## 5. 稀疏度统计函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "06a2c4ad",
   "metadata": {},
   "outputs": [],
   "source": [
    "def count_nonzero_and_sparsity(model: nn.Module):\n",
    "    total_params, total_nonzero = 0, 0\n",
    "    per_param = []\n",
    "    for name, p in model.named_parameters():\n",
    "        if p is None:\n",
    "            continue\n",
    "        numel = p.numel()\n",
    "        nz = int(torch.count_nonzero(p).item())\n",
    "        total_params += numel\n",
    "        total_nonzero += nz\n",
    "        per_param.append({\n",
    "            'name': name,\n",
    "            'numel': int(numel),\n",
    "            'nonzero': nz,\n",
    "            'sparsity': float(1 - nz / max(1, numel))\n",
    "        })\n",
    "    overall = {\n",
    "        'total_params': int(total_params),\n",
    "        'total_nonzero': int(total_nonzero),\n",
    "        'overall_sparsity': float(1 - total_nonzero / max(1, total_params))\n",
    "    }\n",
    "    return overall, per_param\n",
    "\n",
    "print('Baseline sparsity:', count_nonzero_and_sparsity(model)[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d3d86997",
   "metadata": {},
   "source": [
    "## 6. 幅度全局非结构化剪枝 (10%)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "11ad5ba6",
   "metadata": {},
   "outputs": [],
   "source": [
    "def magnitude_global_prune_(model: nn.Module, amount: float = 0.10, modules_filter=(nn.Linear,)):\n",
    "    params = []\n",
    "    for m in model.modules():\n",
    "        if isinstance(m, modules_filter) and hasattr(m, 'weight'):\n",
    "            params.append(m.weight.data.view(-1))\n",
    "    if not params:\n",
    "        print('No Linear weights to prune')\n",
    "        return 0\n",
    "    weights = torch.cat(params)\n",
    "    k = int(amount * weights.numel())\n",
    "    if k <= 0:\n",
    "        return 0\n",
    "    thr = weights.abs().kthvalue(k).values.item()\n",
    "    pruned = 0\n",
    "    for m in model.modules():\n",
    "        if isinstance(m, modules_filter) and hasattr(m, 'weight'):\n",
    "            w = m.weight.data\n",
    "            mask = (w.abs() >= thr)\n",
    "            pruned += int((~mask).sum().item())\n",
    "            w.mul_(mask)\n",
    "    return pruned\n",
    "\n",
    "print('Magnitude prune 10%...')\n",
    "pruned = magnitude_global_prune_(model, amount=0.10)\n",
    "print('Pruned params:', pruned)\n",
    "print('Metrics:', evaluate(val_loader, model))\n",
    "print('Sparsity:', count_nonzero_and_sparsity(model)[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c8c006df",
   "metadata": {},
   "source": [
    "## 7. SNIP 梯度重要性剪枝 (5%)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e1bc80de",
   "metadata": {},
   "outputs": [],
   "source": [
    "def snip_prune_(model: nn.Module, dataloader: DataLoader, amount: float = 0.05, modules_filter=(nn.Linear,)):\n",
    "    model.train()\n",
    "    for p in model.parameters():\n",
    "        if p.grad is not None:\n",
    "            p.grad = None\n",
    "    batch = next(iter(dataloader))\n",
    "    x = batch['x'].to(device)\n",
    "    adj = batch['adj'].to(device)\n",
    "    y = batch['label'].to(device)\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    logits = model(x, adj)\n",
    "    loss = criterion(logits, y)\n",
    "    loss.backward()\n",
    "\n",
    "    scores = []\n",
    "    params_refs = []\n",
    "    for m in model.modules():\n",
    "        if isinstance(m, modules_filter) and hasattr(m, 'weight') and m.weight.grad is not None:\n",
    "            w = m.weight\n",
    "            s = (w.grad * w).abs().detach().view(-1)\n",
    "            scores.append(s)\n",
    "            params_refs.append(w)\n",
    "    if not scores:\n",
    "        print('SNIP: no scores collected')\n",
    "        return 0\n",
    "    all_scores = torch.cat(scores)\n",
    "    k = int(amount * all_scores.numel())\n",
    "    if k <= 0:\n",
    "        return 0\n",
    "    thr = all_scores.kthvalue(k).values.item()\n",
    "    pruned = 0\n",
    "    for w, s in zip(params_refs, scores):\n",
    "        s_full = s.view_as(w)\n",
    "        mask = (s_full >= thr)\n",
    "        pruned += int((~mask).sum().item())\n",
    "        with torch.no_grad():\n",
    "            w.mul_(mask)\n",
    "    return pruned\n",
    "\n",
    "print('SNIP prune 5%...')\n",
    "snip_cnt = snip_prune_(model, train_loader, amount=0.05)\n",
    "print('SNIP pruned params:', snip_cnt)\n",
    "print('Metrics:', evaluate(val_loader, model))\n",
    "print('Sparsity:', count_nonzero_and_sparsity(model)[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8c57414b",
   "metadata": {},
   "source": [
    "## 8. 使用 nn.utils.prune L1 非结构化剪枝 (5%) 并固化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e4de65da",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn.utils.prune as prune\n",
    "\n",
    "def apply_l1_unstructured(model: nn.Module, amount: float = 0.05, modules_filter=(nn.Linear,)):\n",
    "    params = []\n",
    "    for m in model.modules():\n",
    "        if isinstance(m, modules_filter) and hasattr(m, 'weight'):\n",
    "            params.append((m, 'weight'))\n",
    "    if not params:\n",
    "        print('No Linear params to prune')\n",
    "        return\n",
    "    prune.global_unstructured(params, pruning_method=prune.L1Unstructured, amount=amount)\n",
    "    print('Applied global L1 prune')\n",
    "\n",
    "apply_l1_unstructured(model, amount=0.05)\n",
    "# 固化权重（移除重参数化）\n",
    "for m in model.modules():\n",
    "    if isinstance(m, nn.Linear) and hasattr(m, 'weight_orig'):\n",
    "        prune.remove(m, 'weight')\n",
    "\n",
    "print('Metrics:', evaluate(val_loader, model))\n",
    "print('Sparsity:', count_nonzero_and_sparsity(model)[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4f9ee53c",
   "metadata": {},
   "source": [
    "## 9. “结构化样式”神经元级剪枝（按输出通道L1范数）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c492fe04",
   "metadata": {},
   "outputs": [],
   "source": [
    "def prune_linear_out_neurons_(linear: nn.Linear, amount: float = 0.1):\n",
    "    # 选择输出通道（行）L1最小的若干作为“剪除目标”，直接置零\n",
    "    W = linear.weight.data  # [out, in]\n",
    "    out_l1 = W.abs().sum(dim=1)\n",
    "    k = int(amount * W.size(0))\n",
    "    if k <= 0:\n",
    "        return 0\n",
    "    thr = out_l1.kthvalue(k).values.item()\n",
    "    mask_rows = (out_l1 >= thr).float().unsqueeze(1)  # [out,1]\n",
    "    # 置零对应输出神经元的权重与偏置\n",
    "    W.mul_(mask_rows)\n",
    "    if linear.bias is not None:\n",
    "        b = linear.bias.data\n",
    "        b.mul_(mask_rows.squeeze(1))\n",
    "    return int((mask_rows == 0).sum().item())\n",
    "\n",
    "# 演示对占位模型第二层fc2进行 10% 神经元剪枝\n",
    "removed = prune_linear_out_neurons_(model.fc2, amount=0.10)\n",
    "print('Pruned output neurons:', removed)\n",
    "print('Metrics:', evaluate(val_loader, model))\n",
    "print('Sparsity:', count_nonzero_and_sparsity(model)[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "818a28f4",
   "metadata": {},
   "source": [
    "## 10. （可选）剪枝后简短微调循环"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "abebbcae",
   "metadata": {},
   "outputs": [],
   "source": [
    "def fine_tune(model, dataloader, epochs=2, lr=1e-3):\n",
    "    model.train()\n",
    "    opt = torch.optim.Adam([p for p in model.parameters() if p.requires_grad], lr=lr)\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    for ep in range(epochs):\n",
    "        tot_loss = 0\n",
    "        for batch in dataloader:\n",
    "            x = batch['x'].to(device)\n",
    "            adj = batch['adj'].to(device)\n",
    "            y = batch['label'].to(device)\n",
    "            opt.zero_grad()\n",
    "            logits = model(x, adj)\n",
    "            loss = criterion(logits, y)\n",
    "            loss.backward()\n",
    "            opt.step()\n",
    "            tot_loss += loss.item()\n",
    "        print(f'Epoch {ep+1}: loss={tot_loss/len(dataloader):.4f}')\n",
    "    return model\n",
    "\n",
    "print('微调2个epoch...')\n",
    "fine_tune(model, train_loader, epochs=2, lr=1e-3)\n",
    "print('Fine-tune metrics:', evaluate(val_loader, model))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "60762bc9",
   "metadata": {},
   "source": [
    "## 11. 保存剪枝后模型与后续建议"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5c11df6b",
   "metadata": {},
   "outputs": [],
   "source": [
    "save_dir = 'apgcc_pruned'\n",
    "os.makedirs(save_dir, exist_ok=True)\n",
    "path = os.path.join(save_dir, 'model.bin')\n",
    "torch.save(model.state_dict(), path)\n",
    "print('Saved model to', path)\n",
    "print('Final metrics:', evaluate(val_loader, model))\n",
    "print('Final sparsity:', count_nonzero_and_sparsity(model)[0])"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
