{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "d38179e2-8371-4de4-9959-896b9b2b1e2f",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple\n",
      "Collecting scikit-learn\n",
      "  Downloading https://pypi.tuna.tsinghua.edu.cn/packages/26/92/85ec172418f39474c1cd0221d611345d4f433fc4ee2fc68e01f524ccc4e4/scikit_learn-1.7.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (9.7 MB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m9.7/9.7 MB\u001b[0m \u001b[31m74.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n",
      "\u001b[?25hRequirement already satisfied: numpy>=1.22.0 in /environment/miniconda3/lib/python3.11/site-packages (from scikit-learn) (1.26.4)\n",
      "Collecting scipy>=1.8.0 (from scikit-learn)\n",
      "  Downloading https://pypi.tuna.tsinghua.edu.cn/packages/bb/ba/63f402e74875486b87ec6506a4f93f6d8a0d94d10467280f3d9d7837ce3a/scipy-1.16.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl (35.4 MB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m35.4/35.4 MB\u001b[0m \u001b[31m50.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n",
      "\u001b[?25hCollecting joblib>=1.2.0 (from scikit-learn)\n",
      "  Downloading https://pypi.tuna.tsinghua.edu.cn/packages/7d/4f/1195bbac8e0c2acc5f740661631d8d750dc38d4a32b23ee5df3cde6f4e0d/joblib-1.5.1-py3-none-any.whl (307 kB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m307.7/307.7 kB\u001b[0m \u001b[31m20.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hCollecting threadpoolctl>=3.1.0 (from scikit-learn)\n",
      "  Downloading https://pypi.tuna.tsinghua.edu.cn/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl (18 kB)\n",
      "Installing collected packages: threadpoolctl, scipy, joblib, scikit-learn\n",
      "Successfully installed joblib-1.5.1 scikit-learn-1.7.1 scipy-1.16.1 threadpoolctl-3.6.0\n"
     ]
    }
   ],
   "source": [
    "!pip install scikit-learn"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "357d29c2-11bf-4f5b-8df7-c348fd2d52b4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using device: cuda\n",
      "Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\n",
      "Failed to download (trying next):\n",
      "HTTP Error 404: Not Found\n",
      "\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz to ./data/MNIST/raw/train-images-idx3-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 9912422/9912422 [13:05<00:00, 12612.92it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/raw/train-images-idx3-ubyte.gz to ./data/MNIST/raw\n",
      "\n",
      "Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\n",
      "Failed to download (trying next):\n",
      "HTTP Error 404: Not Found\n",
      "\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz to ./data/MNIST/raw/train-labels-idx1-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 28881/28881 [00:01<00:00, 23569.28it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/raw/train-labels-idx1-ubyte.gz to ./data/MNIST/raw\n",
      "\n",
      "Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\n",
      "Failed to download (trying next):\n",
      "HTTP Error 404: Not Found\n",
      "\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz to ./data/MNIST/raw/t10k-images-idx3-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 1648877/1648877 [02:41<00:00, 10192.05it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/raw/t10k-images-idx3-ubyte.gz to ./data/MNIST/raw\n",
      "\n",
      "Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\n",
      "Failed to download (trying next):\n",
      "HTTP Error 404: Not Found\n",
      "\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz to ./data/MNIST/raw/t10k-labels-idx1-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 4542/4542 [00:00<00:00, 1474955.77it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/raw/t10k-labels-idx1-ubyte.gz to ./data/MNIST/raw\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from tqdm import tqdm\n",
    "from sklearn.metrics.pairwise import cosine_similarity\n",
    "\n",
    "# 检查GPU可用性\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "print(f\"Using device: {device}\")\n",
    "\n",
    "# 设置随机种子以确保可重复性\n",
    "torch.manual_seed(42)\n",
    "np.random.seed(42)\n",
    "\n",
    "# 数据加载和预处理\n",
    "transform = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.1307,), (0.3081,))\n",
    "])\n",
    "\n",
    "train_set = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n",
    "test_set = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)\n",
    "\n",
    "train_loader = torch.utils.data.DataLoader(train_set, batch_size=128, shuffle=True)\n",
    "test_loader = torch.utils.data.DataLoader(test_set, batch_size=1000, shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 95,
   "id": "43dbcb51-ee2a-4262-9dd6-f89d18d41236",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/15: 100%|██████████| 469/469 [00:13<00:00, 34.10it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1, Loss: 0.4024, Test Acc: 90.84%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/15: 100%|██████████| 469/469 [00:13<00:00, 33.57it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2, Loss: 0.1744, Test Acc: 93.28%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/15: 100%|██████████| 469/469 [00:13<00:00, 34.64it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 3, Loss: 0.1293, Test Acc: 94.59%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/15: 100%|██████████| 469/469 [00:13<00:00, 34.66it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 4, Loss: 0.1025, Test Acc: 92.76%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 5/15: 100%|██████████| 469/469 [00:13<00:00, 34.99it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 5, Loss: 0.0847, Test Acc: 94.94%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 6/15: 100%|██████████| 469/469 [00:13<00:00, 34.99it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 6, Loss: 0.0744, Test Acc: 94.51%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 7/15: 100%|██████████| 469/469 [00:13<00:00, 34.13it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 7, Loss: 0.0641, Test Acc: 95.04%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 8/15: 100%|██████████| 469/469 [00:13<00:00, 34.29it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 8, Loss: 0.0554, Test Acc: 94.88%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 9/15: 100%|██████████| 469/469 [00:13<00:00, 33.74it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 9, Loss: 0.0484, Test Acc: 93.97%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 10/15: 100%|██████████| 469/469 [00:14<00:00, 33.41it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 10, Loss: 0.0427, Test Acc: 95.00%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 11/15: 100%|██████████| 469/469 [00:13<00:00, 34.60it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 11, Loss: 0.0391, Test Acc: 93.73%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 12/15: 100%|██████████| 469/469 [00:14<00:00, 33.37it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 12, Loss: 0.0338, Test Acc: 94.15%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 13/15: 100%|██████████| 469/469 [00:14<00:00, 33.47it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 13, Loss: 0.0316, Test Acc: 95.17%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 14/15: 100%|██████████| 469/469 [00:13<00:00, 34.36it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 14, Loss: 0.0277, Test Acc: 94.88%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 15/15: 100%|██████████| 469/469 [00:13<00:00, 34.66it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 15, Loss: 0.0260, Test Acc: 93.34%\n"
     ]
    }
   ],
   "source": [
    "# 定义MLP模型（使用float64精度）\n",
    "class MLP(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(MLP, self).__init__()\n",
    "        self.fc1 = nn.Linear(28*28, 64)\n",
    "        self.fc2 = nn.Linear(64, 32)\n",
    "        self.fc3 = nn.Linear(32, 10)\n",
    "        self.relu = nn.ReLU()\n",
    "        self.double()  # 设置所有参数为float64\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x.view(-1, 28*28)\n",
    "        x = self.relu(self.fc1(x))\n",
    "        x = self.relu(self.fc2(x))\n",
    "        x = self.fc3(x)\n",
    "        return x\n",
    "\n",
    "# 实例化模型并移动到GPU\n",
    "model = MLP().to(device)\n",
    "\n",
    "# 训练函数\n",
    "def train_model(model, train_loader, test_loader, epochs=5):\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "    \n",
    "    for epoch in range(epochs):\n",
    "        model.train()\n",
    "        running_loss = 0.0\n",
    "        for images, labels in tqdm(train_loader, desc=f'Epoch {epoch+1}/{epochs}'):\n",
    "            # 确保输入是double类型\n",
    "            images = images.double().to(device)\n",
    "            labels = labels.to(device)\n",
    "            \n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(images)  # 现在类型匹配\n",
    "            loss = criterion(outputs, labels)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            \n",
    "            running_loss += loss.item()\n",
    "        \n",
    "        # 验证\n",
    "        model.eval()\n",
    "        correct = 0\n",
    "        total = 0\n",
    "        with torch.no_grad():\n",
    "            for images, labels in test_loader:\n",
    "                images = images.double().to(device)\n",
    "                labels = labels.to(device)\n",
    "                outputs = model(images)\n",
    "                _, predicted = torch.max(outputs.data, 1)\n",
    "                total += labels.size(0)\n",
    "                correct += (predicted == labels).sum().item()\n",
    "        \n",
    "        print(f'Epoch {epoch+1}, Loss: {running_loss/len(train_loader):.4f}, '\n",
    "              f'Test Acc: {100 * correct / total:.2f}%')\n",
    "\n",
    "# 训练模型\n",
    "train_model(model, train_loader, test_loader, epochs=15)\n",
    "\n",
    "# 保存模型\n",
    "torch.save(model.state_dict(), 'mlp_mnist.pth')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "7bdd97ed-5355-45d5-b017-516646b3b211",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 选择50个测试样本\n",
    "test_images, test_labels = next(iter(test_loader))\n",
    "test_images, test_labels = test_images[:50].to(device), test_labels[:50].to(device)\n",
    "\n",
    "# 获取原始输出\n",
    "model.eval()\n",
    "with torch.no_grad():\n",
    "    test_images = test_images.double()\n",
    "    original_output = model(test_images).cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "dd2a1c26-b999-4180-965e-6050c19c3254",
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluate_importance(model, test_images, test_labels):\n",
    "    layers = [model.fc1, model.fc2, model.fc3]\n",
    "    results = {\n",
    "        'cosine': {1: [], 2: [], 3: []},\n",
    "        'diff_norm': {1: [], 2: [], 3: []},\n",
    "        'accuracy': {1: [], 2: [], 3: []}\n",
    "    }\n",
    "    \n",
    "    # 获取原始输出（在GPU上）\n",
    "    with torch.no_grad():\n",
    "        original_output = model(test_images)\n",
    "        original_preds = torch.argmax(original_output, dim=1)\n",
    "    \n",
    "    # 逐层评估\n",
    "    for layer_idx, layer in enumerate(layers, start=1):\n",
    "        print(f\"\\nEvaluating layer {layer_idx} importance...\")\n",
    "        weight = layer.weight.data.clone()\n",
    "        num_neurons_out, num_neurons_in = weight.shape\n",
    "        \n",
    "        # 初始化重要性矩阵\n",
    "        cos_importance = torch.zeros((num_neurons_out, num_neurons_in), \n",
    "                                     dtype=torch.float64, device=device)\n",
    "        norm_importance = torch.zeros((num_neurons_out, num_neurons_in), \n",
    "                                      dtype=torch.float64, device=device)\n",
    "        acc_importance = torch.zeros((num_neurons_out, num_neurons_in), \n",
    "                                     dtype=torch.float64, device=device)\n",
    "        \n",
    "        # 使用GPU并行计算差向量模长\n",
    "        def compute_diff_norm(orig, pruned):\n",
    "            diff = orig - pruned\n",
    "            return torch.norm(diff, p=2, dim=1).mean()\n",
    "        \n",
    "        # 使用GPU并行计算余弦相似度\n",
    "        def compute_cos_sim(orig, pruned):\n",
    "            orig_norm = torch.norm(orig, p=2, dim=1)\n",
    "            pruned_norm = torch.norm(pruned, p=2, dim=1)\n",
    "            dot_product = torch.sum(orig * pruned, dim=1)\n",
    "            return (dot_product / (orig_norm * pruned_norm)).mean()\n",
    "        \n",
    "        # 使用GPU并行计算正确率\n",
    "        def compute_accuracy(pruned_output):\n",
    "            pruned_preds = torch.argmax(pruned_output, dim=1)\n",
    "            return torch.where(bool_tensor, 1.0, 0.0).float().mean()\n",
    "        \n",
    "        # 逐连接评估（使用GPU加速）\n",
    "        for i in tqdm(range(num_neurons_out)):\n",
    "            # 批量处理同一输出神经元的连接\n",
    "            for j in range(num_neurons_in):\n",
    "                # 保存原始权重\n",
    "                original_weight = weight[i, j].item()\n",
    "                \n",
    "                # 剪枝：将权重置零\n",
    "                layer.weight.data[i, j] = 0\n",
    "                \n",
    "                # 计算剪枝后的输出\n",
    "                with torch.no_grad():\n",
    "                    pruned_output = model(test_images)\n",
    "                \n",
    "                # GPU计算指标\n",
    "                cos_importance[i, j] = -compute_cos_sim(original_output, pruned_output)\n",
    "                norm_importance[i, j] = compute_diff_norm(original_output, pruned_output)\n",
    "                acc_importance[i, j] = compute_accuracy(pruned_output)\n",
    "                \n",
    "                # 恢复权重\n",
    "                layer.weight.data[i, j] = original_weight\n",
    "        \n",
    "        # 将结果移到CPU并转换为numpy数组\n",
    "        results['cosine'][layer_idx] = cos_importance.cpu().numpy()\n",
    "        results['diff_norm'][layer_idx] = norm_importance.cpu().numpy()\n",
    "        results['accuracy'][layer_idx] = acc_importance.cpu().numpy()\n",
    "    \n",
    "    return results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "1cb85fa5-f1fc-4f1a-a502-202e68f0cb3f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Evaluating layer 1 importance...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 64/64 [00:36<00:00,  1.76it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Evaluating layer 2 importance...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 32/32 [00:01<00:00, 21.35it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Evaluating layer 3 importance...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 10/10 [00:00<00:00, 42.62it/s]\n"
     ]
    }
   ],
   "source": [
    "# 评估重要性\n",
    "importance_results = evaluate_importance(model, test_images, original_output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "8ff55e3e-491e-4687-9765-44bfdb6bd85a",
   "metadata": {},
   "outputs": [],
   "source": [
    "from matplotlib.colors import LinearSegmentedColormap"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9831a328-0a00-43c6-bd2d-6adfe77070c0",
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_combined_heatmaps(results):\n",
    "    # 定义三种重要性指标\n",
    "    metrics = ['cosine', 'diff_norm', 'accuracy']\n",
    "    titles = ['Cosine Similarity Importance', 'Difference Norm Importance', 'Accuracy Importance']\n",
    "    layer_names = ['Input to Hidden (784×64)', 'Hidden to Hidden (64×32)', 'Hidden to Output (32×10)']\n",
    "    \n",
    "    # 定义经典热成像配色（红蓝渐变）\n",
    "    colormap = plt.cm.coolwarm\n",
    "    #colormap = plt.cm.CMRmap\n",
    "    colors = [\n",
    "        (0, 0, 0.5),    # 深蓝\n",
    "        (0, 0, 1),      # 蓝色\n",
    "        (0, 1, 1),      # 青色\n",
    "        (0, 1, 0),      # 绿色\n",
    "        (1, 1, 0),      # 黄色\n",
    "        (1, 0, 0)       # 红色\n",
    "    ]\n",
    "    cmap_name = 'weight_heatmap'\n",
    "    colormap = LinearSegmentedColormap.from_list(cmap_name, colors)\n",
    "    \n",
    "    # 为每种重要性指标创建一张图\n",
    "    for metric_idx, metric in enumerate(metrics):\n",
    "        # 获取各层数据\n",
    "        layer1_data = results[metric][1]\n",
    "        layer2_data = results[metric][2]\n",
    "        layer3_data = results[metric][3]\n",
    "        \n",
    "        # 创建图形，竖向排列三个子图\n",
    "        fig = plt.figure(figsize=(10, 18))\n",
    "        gs = fig.add_gridspec(3, 2, width_ratios=[15, 1], height_ratios=[1, 1, 1])\n",
    "        \n",
    "        # 创建三个热图子图\n",
    "        ax1 = fig.add_subplot(gs[0, 0])\n",
    "        ax2 = fig.add_subplot(gs[1, 0])\n",
    "        ax3 = fig.add_subplot(gs[2, 0])\n",
    "        \n",
    "        # 创建共享的颜色条轴\n",
    "        cax = fig.add_subplot(gs[:, 1])\n",
    "        \n",
    "        # 绘制第一层热图\n",
    "        im1 = ax1.imshow(layer1_data, cmap=colormap)#, aspect=12.25\n",
    "        ax1.set_title(f'Layer 1: {layer_names[0]}', fontsize=12)\n",
    "        ax1.set_xlabel('Input Neurons (784)', fontsize=10)\n",
    "        ax1.set_ylabel('Output Neurons (512)', fontsize=10)\n",
    "        \n",
    "        # 绘制第二层热图\n",
    "        im2 = ax2.imshow(layer2_data, cmap=colormap)#, aspect=2\n",
    "        ax2.set_title(f'Layer 2: {layer_names[1]}', fontsize=12)\n",
    "        ax2.set_xlabel('Input Neurons (512)', fontsize=10)\n",
    "        ax2.set_ylabel('Output Neurons (256)', fontsize=10)\n",
    "        \n",
    "        # 绘制第三层热图\n",
    "        im3 = ax3.imshow(layer3_data, cmap=colormap)#, aspect=3.2\n",
    "        ax3.set_title(f'Layer 3: {layer_names[2]}', fontsize=12)\n",
    "        ax3.set_xlabel('Input Neurons (256)', fontsize=10)\n",
    "        ax3.set_ylabel('Output Neurons (10)', fontsize=10)\n",
    "        \n",
    "        # 添加主标题\n",
    "        fig.suptitle(titles[metric_idx], fontsize=16, y=0.95)\n",
    "        \n",
    "        # 添加共享颜色条\n",
    "        vmin = min(layer1_data.min(), layer2_data.min(), layer3_data.min())\n",
    "        vmax = max(layer1_data.max(), layer2_data.max(), layer3_data.max())\n",
    "        norm = plt.Normalize(vmin, vmax)\n",
    "        sm = plt.cm.ScalarMappable(cmap=colormap, norm=norm)\n",
    "        sm.set_array([])\n",
    "        \n",
    "        # 添加颜色条\n",
    "        cbar = fig.colorbar(sm, cax=cax)\n",
    "        cbar.set_label('Importance Value', fontsize=12)\n",
    "        \n",
    "        plt.tight_layout()\n",
    "        plt.subplots_adjust(right=0.85, hspace=0.3)\n",
    "        plt.savefig(f'{metric}_importance_all_layers.png', dpi=300, bbox_inches='tight')\n",
    "        plt.close()\n",
    "\n",
    "# 绘制组合热图\n",
    "plot_combined_heatmaps(importance_results)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "26f643e2-db2a-451d-9b9a-a31b5dd98621",
   "metadata": {},
   "source": [
    "## 剪枝"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 92,
   "id": "a4fac1ff-161d-4c29-961c-7b2546593a94",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Original model accuracy: 94.91%\n",
      "\n",
      "Performing magnitude-based pruning...\n",
      "\n",
      "Performing cosine similarity-based pruning...\n",
      "\n",
      "Performing difference norm-based pruning...\n",
      "\n",
      "Pruning Results:\n",
      "Original Accuracy: 94.91%\n",
      "Magnitude Pruning (10%): Accuracy = 41.22%, Sparsity = 95.00%\n",
      "Cosine Similarity Pruning (10%): Accuracy = 57.68%, Sparsity = 95.00%\n",
      "Difference Norm Pruning (10%): Accuracy = 46.95%, Sparsity = 95.00%\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn.utils.prune as prune\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "\n",
    "def global_pruning(model, pruning_method, amount=0.1, importance_results=None):\n",
    "    \"\"\"\n",
    "    执行全局剪枝\n",
    "    :param model: 待剪枝模型\n",
    "    :param pruning_method: 剪枝方法 ('magnitude', 'cosine', 'diff_norm')\n",
    "    :param amount: 剪枝比例\n",
    "    :param importance_results: 重要性评估结果\n",
    "    \"\"\"\n",
    "    # 获取所有可剪枝参数\n",
    "    parameters_to_prune = []\n",
    "    for name, module in model.named_modules():\n",
    "        if isinstance(module, nn.Linear):\n",
    "            parameters_to_prune.append((module, 'weight'))\n",
    "    \n",
    "    # 根据剪枝方法选择重要性指标\n",
    "    if pruning_method == 'magnitude':\n",
    "        # 绝对值剪枝\n",
    "        prune.global_unstructured(\n",
    "            parameters_to_prune,\n",
    "            pruning_method=prune.L1Unstructured,\n",
    "            amount=amount\n",
    "        )\n",
    "    else:\n",
    "        # 基于重要性的剪枝\n",
    "        if importance_results is None:\n",
    "            raise ValueError(\"Importance results required for this pruning method\")\n",
    "        \n",
    "        # 收集所有连接的重要性分数\n",
    "        all_importances = []\n",
    "        for i, (module, _) in enumerate(parameters_to_prune):\n",
    "            layer_idx = i + 1\n",
    "            if pruning_method == 'cosine':\n",
    "                imp = importance_results['cosine'][layer_idx]\n",
    "            elif pruning_method == 'diff_norm':\n",
    "                imp = importance_results['diff_norm'][layer_idx]\n",
    "            else:\n",
    "                raise ValueError(f\"Unknown pruning method: {pruning_method}\")\n",
    "            \n",
    "            # 展平重要性矩阵\n",
    "            all_importances.append(imp.flatten())\n",
    "        \n",
    "        # 合并所有重要性分数\n",
    "        global_importances = np.concatenate(all_importances)\n",
    "        \n",
    "        # 计算全局阈值（保留前1-amount的重要连接）\n",
    "        threshold = np.percentile(global_importances, amount * 100)\n",
    "        \n",
    "        # 应用剪枝\n",
    "        for i, (module, param_name) in enumerate(parameters_to_prune):\n",
    "            layer_idx = i + 1\n",
    "            if pruning_method == 'cosine':\n",
    "                imp = importance_results['cosine'][layer_idx]\n",
    "            else:\n",
    "                imp = importance_results['diff_norm'][layer_idx]\n",
    "            \n",
    "            # 创建掩码（重要性低于阈值的连接被剪枝）\n",
    "            mask = torch.tensor(imp > threshold, dtype=torch.bool).to(device)\n",
    "            \n",
    "            # 应用自定义剪枝\n",
    "            prune.custom_from_mask(module, name=param_name, mask=mask)\n",
    "    \n",
    "    # 移除剪枝重参数化（使剪枝永久生效）\n",
    "    for module, param_name in parameters_to_prune:\n",
    "        prune.remove(module, param_name)\n",
    "\n",
    "def evaluate_model(model, test_loader):\n",
    "    \"\"\"评估模型在测试集上的准确率\"\"\"\n",
    "    model.eval()\n",
    "    correct = 0\n",
    "    total = 0\n",
    "    with torch.no_grad():\n",
    "        for images, labels in test_loader:\n",
    "            images = images.double().to(device)\n",
    "            labels = labels.to(device)\n",
    "            outputs = model(images)\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            total += labels.size(0)\n",
    "            correct += (predicted == labels).sum().item()\n",
    "    return correct / total\n",
    "\n",
    "# 加载原始模型（确保使用训练好的模型）\n",
    "original_model = MLP().to(device)\n",
    "original_model.load_state_dict(torch.load('mlp_mnist.pth'))\n",
    "original_model.double()\n",
    "\n",
    "# 评估原始模型性能\n",
    "original_acc = evaluate_model(original_model, test_loader)\n",
    "print(f\"Original model accuracy: {original_acc*100:.2f}%\")\n",
    "\n",
    "# 创建剪枝模型副本\n",
    "magnitude_model = MLP().to(device)\n",
    "magnitude_model.load_state_dict(original_model.state_dict())\n",
    "magnitude_model.double()\n",
    "\n",
    "cosine_model = MLP().to(device)\n",
    "cosine_model.load_state_dict(original_model.state_dict())\n",
    "cosine_model.double()\n",
    "\n",
    "diff_norm_model = MLP().to(device)\n",
    "diff_norm_model.load_state_dict(original_model.state_dict())\n",
    "diff_norm_model.double()\n",
    "\n",
    "# 执行不同方法的10%全局剪枝\n",
    "print(\"\\nPerforming magnitude-based pruning...\")\n",
    "global_pruning(magnitude_model, 'magnitude', amount=0.95)\n",
    "\n",
    "print(\"\\nPerforming cosine similarity-based pruning...\")\n",
    "global_pruning(cosine_model, 'cosine', amount=0.95, importance_results=importance_results)\n",
    "\n",
    "print(\"\\nPerforming difference norm-based pruning...\")\n",
    "global_pruning(diff_norm_model, 'diff_norm', amount=0.95, importance_results=importance_results)\n",
    "\n",
    "# 计算稀疏度\n",
    "def calculate_sparsity(model):\n",
    "    total_params = 0\n",
    "    zero_params = 0\n",
    "    for name, module in model.named_modules():\n",
    "        if isinstance(module, nn.Linear):\n",
    "            weight = module.weight.data\n",
    "            total_params += weight.numel()\n",
    "            zero_params += torch.sum(weight == 0).item()\n",
    "    return zero_params / total_params\n",
    "\n",
    "# 评估剪枝后模型性能\n",
    "magnitude_acc = evaluate_model(magnitude_model, test_loader)\n",
    "cosine_acc = evaluate_model(cosine_model, test_loader)\n",
    "diff_norm_acc = evaluate_model(diff_norm_model, test_loader)\n",
    "\n",
    "# 打印结果\n",
    "print(\"\\nPruning Results:\")\n",
    "print(f\"Original Accuracy: {original_acc*100:.2f}%\")\n",
    "print(f\"Magnitude Pruning (10%): Accuracy = {magnitude_acc*100:.2f}%, Sparsity = {calculate_sparsity(magnitude_model)*100:.2f}%\")\n",
    "print(f\"Cosine Similarity Pruning (10%): Accuracy = {cosine_acc*100:.2f}%, Sparsity = {calculate_sparsity(cosine_model)*100:.2f}%\")\n",
    "print(f\"Difference Norm Pruning (10%): Accuracy = {diff_norm_acc*100:.2f}%, Sparsity = {calculate_sparsity(diff_norm_model)*100:.2f}%\")\n",
    "\n",
    "# 保存剪枝后的模型\n",
    "torch.save(magnitude_model.state_dict(), 'magnitude_pruned_model.pth')\n",
    "torch.save(cosine_model.state_dict(), 'cosine_pruned_model.pth')\n",
    "torch.save(diff_norm_model.state_dict(), 'diff_norm_pruned_model.pth')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "id": "30c942dc-7904-4fdf-886c-f0f6d90490d8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "All pruning connectivity plots saved successfully!\n"
     ]
    }
   ],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "from matplotlib.colors import ListedColormap\n",
    "\n",
    "def plot_pruned_connections(model, title, filename):\n",
    "    \"\"\"\n",
    "    绘制剪枝后的网络连接图\n",
    "    :param model: 剪枝后的模型\n",
    "    :param title: 图像标题\n",
    "    :param filename: 保存文件名\n",
    "    \"\"\"\n",
    "    layers = [model.fc1, model.fc2, model.fc3]\n",
    "    layer_names = ['Input to Hidden (784×64)', 'Hidden to Hidden (64×32)', 'Hidden to Output (32×10)']\n",
    "    \n",
    "    # 创建自定义颜色映射（蓝色表示连接存在，白色表示被剪枝）\n",
    "    cmap = ListedColormap(['white', 'black'])#dodgerblue\n",
    "    \n",
    "    # 创建图形\n",
    "    fig = plt.figure(figsize=(12, 18))\n",
    "    gs = fig.add_gridspec(3, 1, height_ratios=[1, 1, 1])\n",
    "    \n",
    "    for i, layer in enumerate(layers):\n",
    "        ax = fig.add_subplot(gs[i])\n",
    "        \n",
    "        # 获取权重矩阵\n",
    "        weights = layer.weight.data.cpu().numpy()\n",
    "        \n",
    "        # 创建连接存在性掩码（非零权重表示连接存在）\n",
    "        mask = (weights != 0).astype(int)\n",
    "        \n",
    "        # 绘制热图\n",
    "        im = ax.imshow(mask, cmap=cmap, vmin=0, vmax=1)#, aspect='auto'\n",
    "        \n",
    "        # 添加标题和标签\n",
    "        ax.set_title(f'Layer {i+1}: {layer_names[i]}', fontsize=14)\n",
    "        ax.set_xlabel('Input Neurons', fontsize=12)\n",
    "        ax.set_ylabel('Output Neurons', fontsize=12)\n",
    "        \n",
    "        # 添加稀疏度信息\n",
    "        sparsity = 100 * (1 - np.mean(mask))\n",
    "        ax.text(0.95, 0.95, f'Sparsity: {sparsity:.1f}%', \n",
    "                transform=ax.transAxes, ha='right', va='top',\n",
    "                bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))\n",
    "    \n",
    "    # 添加主标题\n",
    "    fig.suptitle(title, fontsize=18, y=0.95)\n",
    "    \n",
    "    # 添加图例\n",
    "    '''fig.subplots_adjust(right=0.85)\n",
    "    cbar_ax = fig.add_axes([0.88, 0.15, 0.02, 0.7])\n",
    "    cbar = fig.colorbar(im, cax=cbar_ax)\n",
    "    cbar.set_ticks([0.25, 0.75])\n",
    "    cbar.set_ticklabels(['Pruned', 'Active'])\n",
    "    cbar.ax.tick_params(labelsize=12)'''\n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.savefig(filename, dpi=300, bbox_inches='tight')\n",
    "    plt.close()\n",
    "\n",
    "# 加载剪枝后的模型\n",
    "def load_pruned_model(filename):\n",
    "    model = MLP().to(device)\n",
    "    model.load_state_dict(torch.load(filename))\n",
    "    model.double()\n",
    "    return model\n",
    "\n",
    "# 加载剪枝后的模型\n",
    "magnitude_model = load_pruned_model('magnitude_pruned_model.pth')\n",
    "cosine_model = load_pruned_model('cosine_pruned_model.pth')\n",
    "diff_norm_model = load_pruned_model('diff_norm_pruned_model.pth')\n",
    "\n",
    "# 绘制三种剪枝方法的连接图\n",
    "plot_pruned_connections(magnitude_model, \n",
    "                        'Magnitude-Based Pruning (10%) Connectivity',\n",
    "                        'magnitude_pruning_connectivity.png')\n",
    "\n",
    "plot_pruned_connections(cosine_model, \n",
    "                        'Cosine Similarity-Based Pruning (10%) Connectivity',\n",
    "                        'cosine_pruning_connectivity.png')\n",
    "\n",
    "plot_pruned_connections(diff_norm_model, \n",
    "                        'Difference Norm-Based Pruning (10%) Connectivity',\n",
    "                        'diff_norm_pruning_connectivity.png')\n",
    "\n",
    "print(\"All pruning connectivity plots saved successfully!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 87,
   "id": "04ee60f6-7f3b-4f69-8645-c477f9bf388a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Analyzing neuron importance for layer 1...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 64/64 [00:00<00:00, 1310.32it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Analyzing neuron importance for layer 2...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 32/32 [00:00<00:00, 1281.13it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Analyzing neuron importance for layer 3...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 10/10 [00:00<00:00, 1154.60it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Neuron importance plots saved successfully!\n"
     ]
    }
   ],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import networkx as nx\n",
    "from matplotlib.colors import LinearSegmentedColormap\n",
    "\n",
    "def neuron_importance_analysis(model, test_images, test_labels):\n",
    "    \"\"\"\n",
    "    计算每个神经元的重要性（三种指标）\n",
    "    :param model: 原始模型\n",
    "    :param test_images: 测试图像\n",
    "    :param test_labels: 测试标签\n",
    "    :return: 包含各层神经元重要性的字典\n",
    "    \"\"\"\n",
    "    model.eval()\n",
    "    layers = [model.fc1, model.fc2, model.fc3]\n",
    "    importance = {\n",
    "        'cosine': {1: [], 2: [], 3: []},\n",
    "        'diff_norm': {1: [], 2: [], 3: []},\n",
    "        'accuracy': {1: [], 2: [], 3: []}\n",
    "    }\n",
    "    \n",
    "    # 获取原始输出\n",
    "    with torch.no_grad():\n",
    "        original_output = model(test_images)\n",
    "        original_preds = torch.argmax(original_output, dim=1)\n",
    "    \n",
    "    # 逐层分析神经元重要性\n",
    "    for layer_idx, layer in enumerate(layers, start=1):\n",
    "        print(f\"Analyzing neuron importance for layer {layer_idx}...\")\n",
    "        num_neurons = layer.weight.shape[0]\n",
    "        layer_importance = {\n",
    "            'cosine': np.zeros(num_neurons, dtype=np.float64),\n",
    "            'diff_norm': np.zeros(num_neurons, dtype=np.float64),\n",
    "            'accuracy': np.zeros(num_neurons, dtype=np.float64)\n",
    "        }\n",
    "        \n",
    "        # 备份原始权重和偏置\n",
    "        original_weight = layer.weight.data.clone()\n",
    "        original_bias = layer.bias.data.clone() if layer.bias is not None else None\n",
    "        \n",
    "        # 逐神经元分析\n",
    "        for neuron_idx in tqdm(range(num_neurons)):\n",
    "            # 剪枝：将神经元权重置零\n",
    "            layer.weight.data[neuron_idx] = 0\n",
    "            if layer.bias is not None:\n",
    "                layer.bias.data[neuron_idx] = 0\n",
    "            \n",
    "            # 计算剪枝后的输出\n",
    "            with torch.no_grad():\n",
    "                pruned_output = model(test_images)\n",
    "            \n",
    "            # 计算指标\n",
    "            # 1. 余弦相似度\n",
    "            cos_sim = torch.mean(\n",
    "                torch.nn.functional.cosine_similarity(original_output, pruned_output, dim=1)\n",
    "            ).item()\n",
    "            layer_importance['cosine'][neuron_idx] = 1 - cos_sim  # 值越大表示越重要\n",
    "            \n",
    "            # 2. 差向量模长\n",
    "            diff_norm = torch.mean(\n",
    "                torch.norm(original_output - pruned_output, p=2, dim=1)\n",
    "            ).item()\n",
    "            layer_importance['diff_norm'][neuron_idx] = diff_norm  # 值越大表示越重要\n",
    "            \n",
    "            # 3. 正确率\n",
    "            pruned_preds = torch.argmax(pruned_output, dim=1)\n",
    "            accuracy = torch.mean((pruned_preds == test_labels).float()).item()\n",
    "            layer_importance['accuracy'][neuron_idx] = accuracy  # 值越大表示越重要\n",
    "            \n",
    "            # 恢复权重和偏置\n",
    "            layer.weight.data[neuron_idx] = original_weight[neuron_idx]\n",
    "            if layer.bias is not None:\n",
    "                layer.bias.data[neuron_idx] = original_bias[neuron_idx]\n",
    "        \n",
    "        # 存储结果\n",
    "        importance['cosine'][layer_idx] = layer_importance['cosine']\n",
    "        importance['diff_norm'][layer_idx] = layer_importance['diff_norm']\n",
    "        importance['accuracy'][layer_idx] = layer_importance['accuracy']\n",
    "    \n",
    "    return importance\n",
    "\n",
    "def plot_neuron_importance(neuron_importance, metric, filename):\n",
    "    \"\"\"\n",
    "    绘制神经元重要性图\n",
    "    :param neuron_importance: 神经元重要性字典\n",
    "    :param metric: 指标类型 ('cosine', 'diff_norm', 'accuracy')\n",
    "    :param filename: 保存文件名\n",
    "    \"\"\"\n",
    "    # 创建自定义热图配色 (蓝-白-红)\n",
    "    cmap = LinearSegmentedColormap.from_list('neuron_importance', \n",
    "                                            ['blue', 'white', 'red'])\n",
    "    \n",
    "    # 创建图形\n",
    "    fig, ax = plt.subplots(figsize=(15, 8))\n",
    "    \n",
    "    # 定义网络结构\n",
    "    layer_sizes = [784, 64, 32, 10]\n",
    "    positions = {}\n",
    "    max_neurons = max(layer_sizes)\n",
    "    \n",
    "    # 计算每层位置\n",
    "    for i, size in enumerate(layer_sizes):\n",
    "        y_pos = np.linspace(0, max_neurons, size)\n",
    "        x_pos = i * 1.5 * np.ones(size)\n",
    "        positions.update({(i, j): (x_pos[j], y_pos[j]) for j in range(size)})\n",
    "    \n",
    "    # 创建网络图\n",
    "    G = nx.Graph()\n",
    "    for i in range(len(layer_sizes)):\n",
    "        for j in range(layer_sizes[i]):\n",
    "            G.add_node((i, j))\n",
    "    \n",
    "    # 绘制节点\n",
    "    node_colors = []\n",
    "    for i in range(len(layer_sizes)):\n",
    "        # 获取当前层重要性\n",
    "        if i == 0:  # 输入层没有重要性\n",
    "            importance = np.zeros(layer_sizes[i])\n",
    "        else:\n",
    "            importance = neuron_importance[metric][i]\n",
    "        \n",
    "        # 归一化重要性\n",
    "        norm_importance = (importance - np.min(importance)) / (np.max(importance) - np.min(importance) + 1e-8)\n",
    "        \n",
    "        for j in range(layer_sizes[i]):\n",
    "            # 添加节点\n",
    "            G.add_node((i, j))\n",
    "            \n",
    "            # 设置颜色\n",
    "            color_value = norm_importance[j] if i > 0 else 0.5\n",
    "            node_colors.append(cmap(color_value))\n",
    "    \n",
    "    # 绘制网络\n",
    "    nx.draw_networkx_nodes(G, positions, node_size=50, \n",
    "                          node_color=node_colors, edgecolors='black', linewidths=0.5)\n",
    "    \n",
    "    # 添加层标签\n",
    "    for i in range(len(layer_sizes)):\n",
    "        plt.text(i * 1.5, max_neurons + 0.5, f\"Layer {i+1}\\n({layer_sizes[i]} neurons)\", \n",
    "                 ha='center', fontsize=12, fontweight='bold')\n",
    "    \n",
    "    # 添加连接线（仅示意）\n",
    "    for i in range(len(layer_sizes) - 1):\n",
    "        # 随机选择少量连接示意\n",
    "        num_connections = min(100, layer_sizes[i] * layer_sizes[i+1] // 10)\n",
    "        for _ in range(num_connections):\n",
    "            src = np.random.randint(layer_sizes[i])\n",
    "            dst = np.random.randint(layer_sizes[i+1])\n",
    "            plt.plot([positions[(i, src)][0], positions[(i+1, dst)][0]], \n",
    "                     [positions[(i, src)][1], positions[(i+1, dst)][1]], \n",
    "                     'k-', alpha=0.05, linewidth=0.5)\n",
    "    \n",
    "    # 添加标题和图例\n",
    "    metric_titles = {\n",
    "        'cosine': 'Cosine Similarity Importance',\n",
    "        'diff_norm': 'Difference Norm Importance',\n",
    "        'accuracy': 'Accuracy Importance'\n",
    "    }\n",
    "    plt.title(f'Neuron Importance: {metric_titles[metric]}', fontsize=16)\n",
    "    \n",
    "    # 添加颜色条\n",
    "    sm = plt.cm.ScalarMappable(cmap=cmap, \n",
    "                              norm=plt.Normalize(vmin=0, vmax=1))\n",
    "    sm.set_array([])\n",
    "    cbar = plt.colorbar(sm, ax=ax, pad=0.01)\n",
    "    cbar.set_label('Normalized Importance', fontsize=12)\n",
    "    \n",
    "    # 设置坐标轴\n",
    "    plt.xlim(-0.5, (len(layer_sizes) - 1) * 1.5 + 0.5)\n",
    "    plt.ylim(-0.5, max_neurons + 1)\n",
    "    plt.axis('off')\n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.savefig(filename, dpi=300, bbox_inches='tight')\n",
    "    plt.close()\n",
    "\n",
    "# 加载原始模型\n",
    "original_model = MLP().to(device)\n",
    "original_model.load_state_dict(torch.load('mlp_mnist.pth'))\n",
    "original_model.double()\n",
    "\n",
    "# 获取测试样本\n",
    "test_images, test_labels = next(iter(test_loader))\n",
    "test_images = test_images[:64].double().to(device)\n",
    "test_labels = test_labels[:64].to(device)\n",
    "\n",
    "# 计算神经元重要性\n",
    "neuron_imp = neuron_importance_analysis(original_model, test_images, test_labels)\n",
    "\n",
    "# 绘制三种重要性图\n",
    "plot_neuron_importance(neuron_imp, 'cosine', 'neuron_cosine_importance.png')\n",
    "plot_neuron_importance(neuron_imp, 'diff_norm', 'neuron_diff_norm_importance.png')\n",
    "plot_neuron_importance(neuron_imp, 'accuracy', 'neuron_accuracy_importance.png')\n",
    "\n",
    "print(\"Neuron importance plots saved successfully!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "id": "b8258751-dde6-42f9-b5e5-a1eb4b956d33",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Original model accuracy: 93.34%\n",
      "\n",
      "Pruning by weight absolute sum...\n",
      "Pruning 50.0% least important neurons using weight_sum metric...\n",
      "Global threshold: 7.538499\n",
      "\n",
      "Pruning by cosine similarity importance...\n",
      "Pruning 50.0% least important neurons using cosine metric...\n",
      "Global threshold: 0.010164\n",
      "\n",
      "Pruning by difference norm importance...\n",
      "Pruning 50.0% least important neurons using diff_norm metric...\n",
      "Global threshold: 0.669098\n",
      "\n",
      "Neuron Pruning Results:\n",
      "Original Accuracy: 93.34%\n",
      "Weight Sum Pruning (10% neurons): Accuracy = 9.80%, Neuron Sparsity = 33.33%\n",
      "Cosine Similarity Pruning (10% neurons): Accuracy = 93.34%, Neuron Sparsity = 0.00%\n",
      "Difference Norm Pruning (10% neurons): Accuracy = 93.34%, Neuron Sparsity = 0.00%\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "\n",
    "def prune_neurons(model, importance_metric, amount=0.5, importance_results=None):\n",
    "    \"\"\"\n",
    "    根据指定指标剪枝神经元\n",
    "    :param model: 待剪枝模型\n",
    "    :param importance_metric: 重要性指标 ('weight_sum', 'cosine', 'diff_norm')\n",
    "    :param amount: 剪枝比例\n",
    "    :param importance_results: 重要性评估结果\n",
    "    :return: 剪枝后的模型\n",
    "    \"\"\"\n",
    "    pruned_model = MLP().to(device)\n",
    "    pruned_model.load_state_dict(model.state_dict())\n",
    "    pruned_model.double()\n",
    "    \n",
    "    layers = [pruned_model.fc1, pruned_model.fc2, pruned_model.fc3]\n",
    "    \n",
    "    # 收集所有隐藏层神经元的重要性\n",
    "    all_importances = []\n",
    "    neuron_locations = []  # 存储神经元位置 (layer_idx, neuron_idx)\n",
    "    \n",
    "    for layer_idx, layer in enumerate(layers[1:], start=2):  # 从第二层开始（第一层是输入层）\n",
    "        num_neurons = layer.weight.shape[0]\n",
    "        \n",
    "        if importance_metric == 'weight_sum':\n",
    "            # 计算权重绝对值之和\n",
    "            imp = torch.sum(torch.abs(layer.weight.data), dim=1).cpu().numpy()\n",
    "        elif importance_metric == 'cosine':\n",
    "            imp = importance_results['cosine'][layer_idx]\n",
    "        elif importance_metric == 'diff_norm':\n",
    "            imp = importance_results['diff_norm'][layer_idx]\n",
    "        else:\n",
    "            raise ValueError(f\"Unknown importance metric: {importance_metric}\")\n",
    "        \n",
    "        # 存储神经元重要性\n",
    "        all_importances.append(imp)\n",
    "        # 记录神经元位置\n",
    "        neuron_locations.extend([(layer_idx, i) for i in range(num_neurons)])\n",
    "    \n",
    "    # 合并所有重要性分数\n",
    "    all_importances = np.concatenate(all_importances)\n",
    "    \n",
    "    # 计算全局阈值（保留前1-amount的重要神经元）\n",
    "    threshold = np.percentile(all_importances, amount * 100)\n",
    "    \n",
    "    print(f\"Pruning {amount*100:.1f}% least important neurons using {importance_metric} metric...\")\n",
    "    print(f\"Global threshold: {threshold:.6f}\")\n",
    "    \n",
    "    # 应用剪枝\n",
    "    for layer_idx, neuron_idx in neuron_locations:\n",
    "        if all_importances[len(neuron_locations) - 1] < threshold:  # 检查当前神经元重要性\n",
    "            # 剪枝：将神经元权重置零\n",
    "            layers[layer_idx-1].weight.data[neuron_idx] = 0\n",
    "            if layers[layer_idx-1].bias is not None:\n",
    "                layers[layer_idx-1].bias.data[neuron_idx] = 0\n",
    "    \n",
    "    return pruned_model\n",
    "\n",
    "# 加载原始模型\n",
    "original_model = MLP().to(device)\n",
    "original_model.load_state_dict(torch.load('mlp_mnist.pth'))  # 假设已保存小模型\n",
    "original_model.double()\n",
    "\n",
    "# 评估原始模型性能\n",
    "original_acc = evaluate_model(original_model, test_loader)\n",
    "print(f\"Original model accuracy: {original_acc*100:.2f}%\")\n",
    "\n",
    "# 计算神经元重要性（如果尚未计算）\n",
    "if not 'neuron_imp' in locals():\n",
    "    print(\"Calculating neuron importance...\")\n",
    "    test_images, test_labels = next(iter(test_loader))\n",
    "    test_images = test_images[:50].double().to(device)\n",
    "    test_labels = test_labels[:50].to(device)\n",
    "    neuron_imp = neuron_importance_analysis(original_model, test_images, test_labels)\n",
    "\n",
    "# 执行三种神经元剪枝\n",
    "print(\"\\nPruning by weight absolute sum...\")\n",
    "weight_sum_pruned = prune_neurons(original_model, 'weight_sum', amount=0.5)\n",
    "\n",
    "print(\"\\nPruning by cosine similarity importance...\")\n",
    "cosine_pruned = prune_neurons(original_model, 'cosine', amount=0.5, importance_results=neuron_imp)\n",
    "\n",
    "print(\"\\nPruning by difference norm importance...\")\n",
    "diff_norm_pruned = prune_neurons(original_model, 'diff_norm', amount=0.5, importance_results=neuron_imp)\n",
    "\n",
    "# 评估剪枝后模型性能\n",
    "weight_sum_acc = evaluate_model(weight_sum_pruned, test_loader)\n",
    "cosine_acc = evaluate_model(cosine_pruned, test_loader)\n",
    "diff_norm_acc = evaluate_model(diff_norm_pruned, test_loader)\n",
    "\n",
    "# 计算稀疏度（神经元级别）\n",
    "def calculate_neuron_sparsity(model):\n",
    "    total_neurons = 0\n",
    "    dead_neurons = 0\n",
    "    for name, module in model.named_modules():\n",
    "        if isinstance(module, nn.Linear) and name != 'fc3':  # 只考虑隐藏层\n",
    "            # 检查神经元是否被剪枝（输出全零）\n",
    "            for i in range(module.weight.shape[0]):\n",
    "                if torch.all(module.weight.data[i] == 0) and (module.bias is None or module.bias.data[i] == 0):\n",
    "                    dead_neurons += 1\n",
    "                total_neurons += 1\n",
    "    return dead_neurons / total_neurons if total_neurons > 0 else 0\n",
    "\n",
    "# 打印结果\n",
    "print(\"\\nNeuron Pruning Results:\")\n",
    "print(f\"Original Accuracy: {original_acc*100:.2f}%\")\n",
    "print(f\"Weight Sum Pruning (10% neurons): Accuracy = {weight_sum_acc*100:.2f}%, Neuron Sparsity = {calculate_neuron_sparsity(weight_sum_pruned)*100:.2f}%\")\n",
    "print(f\"Cosine Similarity Pruning (10% neurons): Accuracy = {cosine_acc*100:.2f}%, Neuron Sparsity = {calculate_neuron_sparsity(cosine_pruned)*100:.2f}%\")\n",
    "print(f\"Difference Norm Pruning (10% neurons): Accuracy = {diff_norm_acc*100:.2f}%, Neuron Sparsity = {calculate_neuron_sparsity(diff_norm_pruned)*100:.2f}%\")\n",
    "\n",
    "# 保存剪枝后的模型\n",
    "torch.save(weight_sum_pruned.state_dict(), 'weight_sum_neuron_pruned.pth')\n",
    "torch.save(cosine_pruned.state_dict(), 'cosine_neuron_pruned.pth')\n",
    "torch.save(diff_norm_pruned.state_dict(), 'diff_norm_neuron_pruned.pth')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 118,
   "id": "1b16e5f0-4c2d-49d6-9065-dbac009e3415",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Network structure plots saved successfully!\n"
     ]
    }
   ],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "from matplotlib.colors import LinearSegmentedColormap\n",
    "import matplotlib.patches as mpatches\n",
    "\n",
    "def plot_pruned_network(model, title, filename, pruned_model=None):\n",
    "    \"\"\"\n",
    "    绘制剪枝后的网络结构图，标记被剪除的神经元\n",
    "    :param model: 原始模型\n",
    "    :param title: 图像标题\n",
    "    :param filename: 保存文件名\n",
    "    :param pruned_model: 剪枝后的模型（可选）\n",
    "    \"\"\"\n",
    "    # 获取层信息\n",
    "    layer_sizes = [784, 64, 32, 10]\n",
    "    layers = [model.fc1, model.fc2, model.fc3]\n",
    "    \n",
    "    # 创建图形\n",
    "    fig = plt.figure(figsize=(15, 10))\n",
    "    ax = fig.add_subplot(111)\n",
    "    \n",
    "    # 定义位置\n",
    "    layer_positions = np.linspace(0, 1, len(layer_sizes))\n",
    "    neuron_positions = {}\n",
    "    \n",
    "    # 计算每层神经元位置\n",
    "    for i, size in enumerate(layer_sizes):\n",
    "        y_pos = np.linspace(0, 1, size)\n",
    "        neuron_positions[i] = [(layer_positions[i], y) for y in y_pos]\n",
    "    \n",
    "    # 绘制神经元\n",
    "    neuron_colors = []\n",
    "    neuron_labels = []\n",
    "    \n",
    "    # 检查神经元是否被剪除\n",
    "    if pruned_model is not None:\n",
    "        pruned_layers = [pruned_model.fc1, pruned_model.fc2, pruned_model.fc3]\n",
    "    \n",
    "    for i in range(len(layer_sizes)):\n",
    "        for j in range(layer_sizes[i]):\n",
    "            # 输入层和输出层总是激活的\n",
    "            status = 'active'\n",
    "            \n",
    "            # 检查隐藏层神经元是否被剪除\n",
    "            if i > 0 and i < len(layer_sizes) - 1:\n",
    "                layer_idx = i  # 对应模型层索引\n",
    "                if pruned_model is not None:\n",
    "                    # 检查权重是否全为零（被剪除）\n",
    "                    if torch.all(pruned_layers[layer_idx-1].weight.data[j] == 0):\n",
    "                        status = 'pruned'\n",
    "            \n",
    "            # 设置颜色和标签\n",
    "            if status == 'active':\n",
    "                color = 'black'\n",
    "                label = 'Active Neuron'\n",
    "            else:\n",
    "                color = 'lightgray'\n",
    "                label = 'Pruned Neuron'\n",
    "            \n",
    "            # 绘制神经元\n",
    "            ax.scatter(layer_positions[i], neuron_positions[i][j][1], \n",
    "                      s=80, color=color, edgecolor='black', zorder=3)\n",
    "            \n",
    "            # 保存颜色和标签用于图例\n",
    "            if label not in neuron_labels:\n",
    "                neuron_colors.append(color)\n",
    "                neuron_labels.append(label)\n",
    "    \n",
    "    # 绘制连接线（只绘制激活的连接）\n",
    "    for i in range(len(layer_sizes) - 1):\n",
    "        # 获取源层和目标层\n",
    "        src_layer = i\n",
    "        dst_layer = i + 1\n",
    "        \n",
    "        # 获取剪枝状态\n",
    "        if pruned_model is not None:\n",
    "            pruned_neurons = []\n",
    "            for j in range(layer_sizes[dst_layer]):\n",
    "                if torch.all(pruned_layers[dst_layer-1].weight.data[j] == 0):\n",
    "                    pruned_neurons.append(j)\n",
    "        \n",
    "        # 绘制连接线\n",
    "        for src_idx in range(layer_sizes[src_layer]):\n",
    "            for dst_idx in range(layer_sizes[dst_layer]):\n",
    "                # 如果目标神经元被剪除，跳过连接\n",
    "                if pruned_model is not None and dst_idx in pruned_neurons:\n",
    "                    continue\n",
    "                \n",
    "                # 随机采样部分连接以避免图像过于密集\n",
    "                if np.random.rand() > 0.95:  # 只绘制5%的连接\n",
    "                    src_pos = neuron_positions[src_layer][src_idx]\n",
    "                    dst_pos = neuron_positions[dst_layer][dst_idx]\n",
    "                    \n",
    "                    # 设置线条样式\n",
    "                    linewidth = 0.5\n",
    "                    alpha = 0.2\n",
    "                    \n",
    "                    # 绘制连接线\n",
    "                    ax.plot([src_pos[0], dst_pos[0]], [src_pos[1], dst_pos[1]], \n",
    "                           'k-', linewidth=linewidth, alpha=alpha, zorder=1)\n",
    "    \n",
    "    # 添加层标签\n",
    "    for i, size in enumerate(layer_sizes):\n",
    "        ax.text(layer_positions[i], -0.05, f'Layer {i+1}\\n({size} neurons)', \n",
    "                ha='center', fontsize=12, fontweight='bold')\n",
    "    \n",
    "    # 添加标题\n",
    "    ax.set_title(title, fontsize=16, pad=20)\n",
    "    \n",
    "    # 添加图例\n",
    "    legend_patches = [mpatches.Patch(color=c, label=l) \n",
    "                      for c, l in zip(neuron_colors, neuron_labels)]\n",
    "    ax.legend(handles=legend_patches, loc='upper center', \n",
    "              bbox_to_anchor=(0.5, -0.05), ncol=2, fontsize=12)\n",
    "    \n",
    "    # 设置坐标轴\n",
    "    ax.set_xlim(-0.1, 1.1)\n",
    "    ax.set_ylim(-0.1, 1.1)\n",
    "    ax.axis('off')\n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.savefig(filename, dpi=300, bbox_inches='tight')\n",
    "    plt.close()\n",
    "\n",
    "# 加载原始模型\n",
    "original_model = MLP().to(device)\n",
    "original_model.load_state_dict(torch.load('mlp_mnist.pth'))\n",
    "original_model.double()\n",
    "\n",
    "# 加载剪枝后的模型\n",
    "weight_sum_pruned = MLP().to(device)\n",
    "weight_sum_pruned.load_state_dict(torch.load('weight_sum_neuron_pruned.pth'))\n",
    "weight_sum_pruned.double()\n",
    "\n",
    "cosine_pruned = MLP().to(device)\n",
    "cosine_pruned.load_state_dict(torch.load('cosine_neuron_pruned.pth'))\n",
    "cosine_pruned.double()\n",
    "\n",
    "diff_norm_pruned = MLP().to(device)\n",
    "diff_norm_pruned.load_state_dict(torch.load('diff_norm_neuron_pruned.pth'))\n",
    "diff_norm_pruned.double()\n",
    "\n",
    "# 绘制原始网络结构\n",
    "plot_pruned_network(original_model, 'Original Network Structure (784-64-32-10)', \n",
    "                   'original_network.png')\n",
    "\n",
    "# 绘制三种剪枝后的网络结构\n",
    "plot_pruned_network(original_model, 'Weight Sum Pruning: Network Structure', \n",
    "                   'weight_sum_pruned_network.png', weight_sum_pruned)\n",
    "\n",
    "plot_pruned_network(original_model, 'Cosine Similarity Pruning: Network Structure', \n",
    "                   'cosine_pruned_network.png', cosine_pruned)\n",
    "\n",
    "plot_pruned_network(original_model, 'Difference Norm Pruning: Network Structure', \n",
    "                   'diff_norm_pruned_network.png', diff_norm_pruned)\n",
    "\n",
    "print(\"Network structure plots saved successfully!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 123,
   "id": "c70caa03-f874-4e9a-9bf4-fc462ff0e405",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/5: 100%|██████████| 469/469 [00:14<00:00, 32.35it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1, Loss: 0.2890, Test Acc: 94.31%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/5: 100%|██████████| 469/469 [00:14<00:00, 32.11it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2, Loss: 0.1066, Test Acc: 95.53%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/5: 100%|██████████| 469/469 [00:14<00:00, 32.85it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 3, Loss: 0.0712, Test Acc: 95.94%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/5: 100%|██████████| 469/469 [00:14<00:00, 31.69it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 4, Loss: 0.0528, Test Acc: 96.21%\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 5/5: 100%|██████████| 469/469 [00:14<00:00, 31.95it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 5, Loss: 0.0419, Test Acc: 96.62%\n"
     ]
    }
   ],
   "source": [
    "# 定义MLP模型（使用float64精度）\n",
    "class MLP1(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(MLP1, self).__init__()\n",
    "        self.fc1 = nn.Linear(28*28, 512)\n",
    "        self.fc2 = nn.Linear(512, 256)\n",
    "        self.fc3 = nn.Linear(256, 128)\n",
    "        self.fc4 = nn.Linear(128, 64)\n",
    "        self.fc5 = nn.Linear(64, 10)\n",
    "        self.relu = nn.ReLU()\n",
    "        self.double()\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x.view(-1, 28*28).double()\n",
    "        x = self.relu(self.fc1(x))\n",
    "        x = self.relu(self.fc2(x))\n",
    "        x = self.relu(self.fc3(x))\n",
    "        x = self.relu(self.fc4(x))\n",
    "        x = self.fc5(x)\n",
    "        return x\n",
    "\n",
    "model1 = MLP1().to(device)\n",
    "\n",
    "# 训练模型\n",
    "train_model(model1, train_loader, test_loader, epochs=5)\n",
    "\n",
    "# 保存模型\n",
    "torch.save(model1.state_dict(), 'mlp1_mnist.pth')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 134,
   "id": "87b06fea-5aba-4fae-ae8f-27bffc48dfc7",
   "metadata": {},
   "outputs": [],
   "source": [
    "def neuron_importance_analysis(model, test_images, test_labels):\n",
    "    \"\"\"\n",
    "    计算每个神经元的重要性（三种指标）\n",
    "    :param model: 原始模型\n",
    "    :param test_images: 测试图像\n",
    "    :param test_labels: 测试标签\n",
    "    :return: 包含各层神经元重要性的字典\n",
    "    \"\"\"\n",
    "    model.eval()\n",
    "    layers = [model.fc1, model.fc2, model.fc3, model.fc4]\n",
    "    importance = {\n",
    "        'cosine': {1: [], 2: [], 3: [], 4: []},\n",
    "        'diff_norm': {1: [], 2: [], 3: [], 4: []},\n",
    "        'accuracy': {1: [], 2: [], 3: [], 4: []}\n",
    "    }\n",
    "    \n",
    "    # 获取原始输出\n",
    "    with torch.no_grad():\n",
    "        original_output = model(test_images)\n",
    "        original_preds = torch.argmax(original_output, dim=1)\n",
    "    \n",
    "    # 逐层分析神经元重要性\n",
    "    for layer_idx, layer in enumerate(layers, start=1):\n",
    "        print(f\"Analyzing neuron importance for layer {layer_idx}...\")\n",
    "        num_neurons = layer.weight.shape[0]\n",
    "        layer_importance = {\n",
    "            'cosine': np.zeros(num_neurons, dtype=np.float64),\n",
    "            'diff_norm': np.zeros(num_neurons, dtype=np.float64),\n",
    "            'accuracy': np.zeros(num_neurons, dtype=np.float64)\n",
    "        }\n",
    "        \n",
    "        # 备份原始权重和偏置\n",
    "        original_weight = layer.weight.data.clone()\n",
    "        original_bias = layer.bias.data.clone() if layer.bias is not None else None\n",
    "        \n",
    "        # 逐神经元分析\n",
    "        for neuron_idx in tqdm(range(num_neurons)):\n",
    "            # 剪枝：将神经元权重置零\n",
    "            layer.weight.data[neuron_idx] = 0\n",
    "            if layer.bias is not None:\n",
    "                layer.bias.data[neuron_idx] = 0\n",
    "            \n",
    "            # 计算剪枝后的输出\n",
    "            with torch.no_grad():\n",
    "                pruned_output = model(test_images)\n",
    "            \n",
    "            # 计算指标\n",
    "            # 1. 余弦相似度\n",
    "            cos_sim = torch.mean(\n",
    "                torch.nn.functional.cosine_similarity(original_output, pruned_output, dim=1)\n",
    "            ).item()\n",
    "            layer_importance['cosine'][neuron_idx] = 1 - cos_sim  # 值越大表示越重要\n",
    "            \n",
    "            # 2. 差向量模长\n",
    "            diff_norm = torch.mean(\n",
    "                torch.norm(original_output - pruned_output, p=2, dim=1)\n",
    "            ).item()\n",
    "            layer_importance['diff_norm'][neuron_idx] = diff_norm  # 值越大表示越重要\n",
    "            \n",
    "            # 3. 正确率\n",
    "            pruned_preds = torch.argmax(pruned_output, dim=1)\n",
    "            accuracy = torch.mean((pruned_preds == test_labels).float()).item()\n",
    "            layer_importance['accuracy'][neuron_idx] = accuracy  # 值越大表示越重要\n",
    "            \n",
    "            # 恢复权重和偏置\n",
    "            layer.weight.data[neuron_idx] = original_weight[neuron_idx]\n",
    "            if layer.bias is not None:\n",
    "                layer.bias.data[neuron_idx] = original_bias[neuron_idx]\n",
    "        \n",
    "        # 存储结果\n",
    "        importance['cosine'][layer_idx] = layer_importance['cosine']\n",
    "        importance['diff_norm'][layer_idx] = layer_importance['diff_norm']\n",
    "        importance['accuracy'][layer_idx] = layer_importance['accuracy']\n",
    "    \n",
    "    return importance"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 133,
   "id": "965795e5-2174-4720-9a40-fd92f1cc86bf",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{1: array([1.42490248e-04, 1.18069332e-03, 1.50906436e-03, 8.39474524e-03,\n",
       "        1.10394585e-02, 5.18560974e-03, 7.42524681e-03, 1.16553681e-03,\n",
       "        1.94250175e-02, 3.69176896e-04, 2.64115981e-03, 8.82356172e-03,\n",
       "        7.79368615e-04, 8.48870025e-04, 3.97876869e-03, 2.32683059e-02,\n",
       "        7.91582705e-03, 2.46054654e-03, 2.07422505e-03, 2.54433143e-08,\n",
       "        2.57233755e-03, 2.44528624e-03, 6.26767694e-03, 9.03080369e-03,\n",
       "        2.03468530e-04, 5.20610601e-03, 3.39298582e-03, 0.00000000e+00,\n",
       "        7.55561052e-04, 3.43438641e-03, 7.96322978e-03, 4.25885374e-08,\n",
       "        7.31976439e-03, 1.80999468e-02, 9.61857809e-03, 9.76682476e-04,\n",
       "        4.95235145e-03, 4.19633017e-03, 1.01956513e-03, 4.52575109e-02,\n",
       "        4.56159337e-03, 1.00088140e-02, 1.10778663e-08, 6.96673247e-03,\n",
       "        3.99701851e-03, 1.98533361e-02, 4.95992475e-05, 4.61322684e-03,\n",
       "        6.63162437e-03, 3.67504647e-04, 9.47965471e-04, 6.15298931e-03,\n",
       "        3.41716137e-03, 4.85457307e-04, 5.58280510e-03, 1.36028735e-03,\n",
       "        3.90569866e-03, 8.13242032e-03, 3.33450593e-03, 1.24007792e-03,\n",
       "        6.44059700e-03, 1.16120168e-03, 2.86111035e-03, 3.27484027e-03]),\n",
       " 2: array([1.95792031e-02, 1.52956145e-02, 1.09155045e-02, 6.88848132e-03,\n",
       "        1.80043162e-02, 1.61842272e-03, 3.37086895e-03, 8.62532476e-03,\n",
       "        4.21260583e-03, 0.00000000e+00, 8.27423439e-04, 3.37823256e-03,\n",
       "        3.51943308e-02, 6.32877509e-03, 1.54973086e-02, 1.18170375e-02,\n",
       "        8.73515599e-03, 2.83092061e-09, 4.96261430e-03, 2.01996050e-02,\n",
       "        2.01494179e-06, 4.76451800e-03, 2.11196342e-03, 2.21582476e-02,\n",
       "        0.00000000e+00, 9.41253260e-03, 7.75349519e-03, 4.87949036e-03,\n",
       "        4.10221537e-03, 1.94071628e-02, 7.43689374e-03, 4.35496089e-02]),\n",
       " 3: array([0.06548882, 0.0562608 , 0.03229267, 0.02115177, 0.06124422,\n",
       "        0.0887976 , 0.12315987, 0.04166244, 0.01148723, 0.0342775 ])}"
      ]
     },
     "execution_count": 133,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "neuron_imp['cosine']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 153,
   "id": "eaab4d6f-ce18-46f7-aa93-c3fa4047e6bd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Original model accuracy: 96.62%\n",
      "Calculating neuron importance...\n",
      "Analyzing neuron importance for layer 1...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 512/512 [00:00<00:00, 774.72it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Analyzing neuron importance for layer 2...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 256/256 [00:00<00:00, 769.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Analyzing neuron importance for layer 3...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 128/128 [00:00<00:00, 790.80it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Analyzing neuron importance for layer 4...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 64/64 [00:00<00:00, 790.42it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Pruning by weight absolute sum...\n",
      "Pruning 80.0% least important neurons using weight_sum metric...\n",
      "Global threshold: 0.040580\n",
      "\n",
      "Pruning by cosine similarity importance...\n",
      "Pruning 80.0% least important neurons using cosine metric...\n",
      "Global threshold: 0.000201\n",
      "\n",
      "Pruning by difference norm importance...\n",
      "Pruning 80.0% least important neurons using diff_norm metric...\n",
      "Global threshold: 0.073730\n",
      "\n",
      "Neuron Pruning Results:\n",
      "Original Accuracy: 96.62%\n",
      "Weight Sum Pruning (10% neurons): Accuracy = 17.41%, Neuron Sparsity = 80.00%\n",
      "Cosine Similarity Pruning (10% neurons): Accuracy = 37.61%, Neuron Sparsity = 80.00%\n",
      "Difference Norm Pruning (10% neurons): Accuracy = 46.81%, Neuron Sparsity = 80.00%\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "\n",
    "def prune_neurons(model, importance_metric, amount=0.1, importance_results=None):\n",
    "    \"\"\"\n",
    "    根据指定指标剪枝神经元\n",
    "    :param model: 待剪枝模型\n",
    "    :param importance_metric: 重要性指标 ('weight_sum', 'cosine', 'diff_norm')\n",
    "    :param amount: 剪枝比例\n",
    "    :param importance_results: 重要性评估结果\n",
    "    :return: 剪枝后的模型\n",
    "    \"\"\"\n",
    "    pruned_model = MLP1().to(device)\n",
    "    pruned_model.load_state_dict(model.state_dict())\n",
    "    pruned_model.double()\n",
    "    \n",
    "    # 获取所有隐藏层\n",
    "    layers = [pruned_model.fc1, pruned_model.fc2, pruned_model.fc3, pruned_model.fc4]\n",
    "    \n",
    "    # 收集所有隐藏层神经元的重要性\n",
    "    all_importances = []\n",
    "    neuron_locations = []  # 存储神经元位置 (layer_idx, neuron_idx)\n",
    "    \n",
    "    for layer_idx, layer in enumerate(layers, start=1):\n",
    "        num_neurons = layer.weight.shape[0]\n",
    "        \n",
    "        if importance_metric == 'weight_sum':\n",
    "            # 计算权重绝对值之和\n",
    "            imp = torch.mean(torch.abs(layer.weight.data), dim=1).cpu().numpy()   #sum\n",
    "        elif importance_metric == 'cosine':\n",
    "            imp = importance_results['cosine'][layer_idx]\n",
    "        elif importance_metric == 'diff_norm':\n",
    "            imp = importance_results['diff_norm'][layer_idx]\n",
    "        else:\n",
    "            raise ValueError(f\"Unknown importance metric: {importance_metric}\")\n",
    "        \n",
    "        # 存储神经元重要性\n",
    "        all_importances.append(imp)\n",
    "        # 记录神经元位置\n",
    "        neuron_locations.extend([(layer_idx, i) for i in range(num_neurons)])\n",
    "    \n",
    "    # 合并所有重要性分数\n",
    "    all_importances = np.concatenate(all_importances)\n",
    "    \n",
    "    # 计算全局阈值（保留前1-amount的重要神经元）\n",
    "    threshold = np.percentile(all_importances, amount * 100)\n",
    "    \n",
    "    print(f\"Pruning {amount*100:.1f}% least important neurons using {importance_metric} metric...\")\n",
    "    print(f\"Global threshold: {threshold:.6f}\")\n",
    "    \n",
    "    # 应用剪枝\n",
    "    for loc_idx, (layer_idx, neuron_idx) in enumerate(neuron_locations):\n",
    "        if all_importances[loc_idx] < threshold:  # 检查当前神经元重要性\n",
    "            # 剪枝：将神经元权重置零\n",
    "            layers[layer_idx-1].weight.data[neuron_idx] = 0\n",
    "            if layers[layer_idx-1].bias is not None:\n",
    "                layers[layer_idx-1].bias.data[neuron_idx] = 0\n",
    "    \n",
    "    return pruned_model\n",
    "\n",
    "# 加载原始模型\n",
    "original_model = MLP1().to(device)\n",
    "original_model.load_state_dict(torch.load('mlp1_mnist.pth'))  # 假设已保存模型\n",
    "original_model.double()\n",
    "\n",
    "# 评估原始模型性能\n",
    "original_acc = evaluate_model(original_model, test_loader)\n",
    "print(f\"Original model accuracy: {original_acc*100:.2f}%\")\n",
    "\n",
    "# 计算神经元重要性（如果尚未计算）\n",
    "#if not 'neuron_imp' in locals():\n",
    "print(\"Calculating neuron importance...\")\n",
    "test_images, test_labels = next(iter(test_loader))\n",
    "test_images = test_images[:50].double().to(device)\n",
    "test_labels = test_labels[:50].to(device)\n",
    "neuron_imp = neuron_importance_analysis(original_model, test_images, test_labels)\n",
    "\n",
    "# 执行三种神经元剪枝\n",
    "print(\"\\nPruning by weight absolute sum...\")\n",
    "weight_sum_pruned = prune_neurons(original_model, 'weight_sum', amount=0.9)\n",
    "\n",
    "print(\"\\nPruning by cosine similarity importance...\")\n",
    "cosine_pruned = prune_neurons(original_model, 'cosine', amount=0.8, importance_results=neuron_imp)\n",
    "\n",
    "print(\"\\nPruning by difference norm importance...\")\n",
    "diff_norm_pruned = prune_neurons(original_model, 'diff_norm', amount=0.8, importance_results=neuron_imp)\n",
    "\n",
    "# 评估剪枝后模型性能\n",
    "weight_sum_acc = evaluate_model(weight_sum_pruned, test_loader)\n",
    "cosine_acc = evaluate_model(cosine_pruned, test_loader)\n",
    "diff_norm_acc = evaluate_model(diff_norm_pruned, test_loader)\n",
    "\n",
    "# 计算稀疏度（神经元级别）\n",
    "def calculate_neuron_sparsity(model):\n",
    "    total_neurons = 0\n",
    "    dead_neurons = 0\n",
    "    # 只考虑隐藏层（fc1到fc4）\n",
    "    for name, module in model.named_modules():\n",
    "        if isinstance(module, nn.Linear) and name.startswith('fc') and name != 'fc5':\n",
    "            # 检查神经元是否被剪枝（输出全零）\n",
    "            for i in range(module.weight.shape[0]):\n",
    "                if torch.all(module.weight.data[i] == 0) and (module.bias is None or module.bias.data[i] == 0):\n",
    "                    dead_neurons += 1\n",
    "                total_neurons += 1\n",
    "    return dead_neurons / total_neurons if total_neurons > 0 else 0\n",
    "\n",
    "# 打印结果\n",
    "print(\"\\nNeuron Pruning Results:\")\n",
    "print(f\"Original Accuracy: {original_acc*100:.2f}%\")\n",
    "print(f\"Weight Sum Pruning (10% neurons): Accuracy = {weight_sum_acc*100:.2f}%, Neuron Sparsity = {calculate_neuron_sparsity(weight_sum_pruned)*100:.2f}%\")\n",
    "print(f\"Cosine Similarity Pruning (10% neurons): Accuracy = {cosine_acc*100:.2f}%, Neuron Sparsity = {calculate_neuron_sparsity(cosine_pruned)*100:.2f}%\")\n",
    "print(f\"Difference Norm Pruning (10% neurons): Accuracy = {diff_norm_acc*100:.2f}%, Neuron Sparsity = {calculate_neuron_sparsity(diff_norm_pruned)*100:.2f}%\")\n",
    "\n",
    "# 保存剪枝后的模型\n",
    "torch.save(weight_sum_pruned.state_dict(), 'weight_sum_neuron_pruned.pth')\n",
    "torch.save(cosine_pruned.state_dict(), 'cosine_neuron_pruned.pth')\n",
    "torch.save(diff_norm_pruned.state_dict(), 'diff_norm_neuron_pruned.pth')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 150,
   "id": "b414dd4a-eaa9-4b0f-8333-88f1b28ba725",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Network structure plots saved successfully!\n"
     ]
    }
   ],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "from matplotlib.colors import LinearSegmentedColormap\n",
    "import matplotlib.patches as mpatches\n",
    "\n",
    "def plot_pruned_network(model, title, filename, pruned_model=None):\n",
    "    \"\"\"\n",
    "    绘制剪枝后的网络结构图，标记被剪除的神经元\n",
    "    :param model: 原始模型\n",
    "    :param title: 图像标题\n",
    "    :param filename: 保存文件名\n",
    "    :param pruned_model: 剪枝后的模型（可选）\n",
    "    \"\"\"\n",
    "    # 获取层信息\n",
    "    layer_sizes = [784, 512, 256, 64, 10]\n",
    "    layers = [model.fc1, model.fc2, model.fc3, model.fc4]\n",
    "    \n",
    "    # 创建图形\n",
    "    fig = plt.figure(figsize=(15, 10))\n",
    "    ax = fig.add_subplot(111)\n",
    "    \n",
    "    # 定义位置\n",
    "    layer_positions = np.linspace(0, 1, len(layer_sizes))\n",
    "    neuron_positions = {}\n",
    "    \n",
    "    # 计算每层神经元位置\n",
    "    for i, size in enumerate(layer_sizes):\n",
    "        y_pos = np.linspace(0, 1, size)\n",
    "        neuron_positions[i] = [(layer_positions[i], y) for y in y_pos]\n",
    "    \n",
    "    # 绘制神经元\n",
    "    neuron_colors = []\n",
    "    neuron_labels = []\n",
    "    \n",
    "    # 检查神经元是否被剪除\n",
    "    if pruned_model is not None:\n",
    "        pruned_layers = [pruned_model.fc1, pruned_model.fc2, pruned_model.fc3, pruned_model.fc4]\n",
    "    \n",
    "    for i in range(len(layer_sizes)):\n",
    "        for j in range(layer_sizes[i]):\n",
    "            # 输入层和输出层总是激活的\n",
    "            status = 'active'\n",
    "            \n",
    "            # 检查隐藏层神经元是否被剪除\n",
    "            if i > 0 and i < len(layer_sizes) - 1:\n",
    "                layer_idx = i  # 对应模型层索引\n",
    "                if pruned_model is not None:\n",
    "                    # 检查权重是否全为零（被剪除）\n",
    "                    if torch.all(pruned_layers[layer_idx-1].weight.data[j] == 0):\n",
    "                        status = 'pruned'\n",
    "            \n",
    "            # 设置颜色和标签\n",
    "            if status == 'active':\n",
    "                color = 'black'\n",
    "                label = 'Active Neuron'\n",
    "            else:\n",
    "                color = 'lightgray'\n",
    "                label = 'Pruned Neuron'\n",
    "            \n",
    "            # 绘制神经元\n",
    "            ax.scatter(layer_positions[i], neuron_positions[i][j][1], \n",
    "                      s=80, color=color, edgecolor='black', zorder=3)\n",
    "            \n",
    "            # 保存颜色和标签用于图例\n",
    "            if label not in neuron_labels:\n",
    "                neuron_colors.append(color)\n",
    "                neuron_labels.append(label)\n",
    "    \n",
    "    # 绘制连接线（只绘制激活的连接）\n",
    "    for i in range(len(layer_sizes) - 1):\n",
    "        # 获取源层和目标层\n",
    "        src_layer = i\n",
    "        dst_layer = i + 1\n",
    "        \n",
    "        # 获取剪枝状态\n",
    "        if pruned_model is not None:\n",
    "            pruned_neurons = []\n",
    "            for j in range(layer_sizes[dst_layer]):\n",
    "                if torch.all(pruned_layers[dst_layer-1].weight.data[j] == 0):\n",
    "                    pruned_neurons.append(j)\n",
    "        \n",
    "        # 绘制连接线\n",
    "        for src_idx in range(layer_sizes[src_layer]):\n",
    "            for dst_idx in range(layer_sizes[dst_layer]):\n",
    "                # 如果目标神经元被剪除，跳过连接\n",
    "                if pruned_model is not None and dst_idx in pruned_neurons:\n",
    "                    continue\n",
    "                \n",
    "                # 随机采样部分连接以避免图像过于密集\n",
    "                if np.random.rand() > 0.99:  # 只绘制1%的连接\n",
    "                    src_pos = neuron_positions[src_layer][src_idx]\n",
    "                    dst_pos = neuron_positions[dst_layer][dst_idx]\n",
    "                    \n",
    "                    # 设置线条样式\n",
    "                    linewidth = 0.5\n",
    "                    alpha = 0.2\n",
    "                    \n",
    "                    # 绘制连接线\n",
    "                    ax.plot([src_pos[0], dst_pos[0]], [src_pos[1], dst_pos[1]], \n",
    "                           'k-', linewidth=linewidth, alpha=alpha, zorder=1)\n",
    "    \n",
    "    # 添加层标签\n",
    "    for i, size in enumerate(layer_sizes):\n",
    "        ax.text(layer_positions[i], -0.05, f'Layer {i+1}\\n({size} neurons)', \n",
    "                ha='center', fontsize=12, fontweight='bold')\n",
    "    \n",
    "    # 添加标题\n",
    "    ax.set_title(title, fontsize=16, pad=20)\n",
    "    \n",
    "    # 添加图例\n",
    "    legend_patches = [mpatches.Patch(color=c, label=l) \n",
    "                      for c, l in zip(neuron_colors, neuron_labels)]\n",
    "    ax.legend(handles=legend_patches, loc='upper center', \n",
    "              bbox_to_anchor=(0.5, -0.05), ncol=2, fontsize=12)\n",
    "    \n",
    "    # 设置坐标轴\n",
    "    ax.set_xlim(-0.1, 1.1)\n",
    "    ax.set_ylim(-0.1, 1.1)\n",
    "    ax.axis('off')\n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.savefig(filename, dpi=300, bbox_inches='tight')\n",
    "    plt.close()\n",
    "\n",
    "# 加载原始模型\n",
    "original_model = MLP1().to(device)\n",
    "original_model.load_state_dict(torch.load('mlp1_mnist.pth'))\n",
    "original_model.double()\n",
    "\n",
    "# 加载剪枝后的模型\n",
    "weight_sum_pruned = MLP1().to(device)\n",
    "weight_sum_pruned.load_state_dict(torch.load('weight_sum_neuron_pruned.pth'))\n",
    "weight_sum_pruned.double()\n",
    "\n",
    "cosine_pruned = MLP1().to(device)\n",
    "cosine_pruned.load_state_dict(torch.load('cosine_neuron_pruned.pth'))\n",
    "cosine_pruned.double()\n",
    "\n",
    "diff_norm_pruned = MLP1().to(device)\n",
    "diff_norm_pruned.load_state_dict(torch.load('diff_norm_neuron_pruned.pth'))\n",
    "diff_norm_pruned.double()\n",
    "\n",
    "# 绘制原始网络结构\n",
    "plot_pruned_network(original_model, 'Original Network Structure (784-64-32-10)', \n",
    "                   'original_network1.png')\n",
    "\n",
    "# 绘制三种剪枝后的网络结构\n",
    "plot_pruned_network(original_model, 'Weight Sum Pruning: Network Structure', \n",
    "                   'weight_sum_pruned_network1.png', weight_sum_pruned)\n",
    "\n",
    "plot_pruned_network(original_model, 'Cosine Similarity Pruning: Network Structure', \n",
    "                   'cosine_pruned_network1.png', cosine_pruned)\n",
    "\n",
    "plot_pruned_network(original_model, 'Difference Norm Pruning: Network Structure', \n",
    "                   'diff_norm_pruned_network1.png', diff_norm_pruned)\n",
    "\n",
    "print(\"Network structure plots saved successfully!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "51ececde-2e28-4846-a89b-26a54d957460",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
