{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision import datasets, transforms\n",
    "import torch.utils.data\n",
    "import numpy as np\n",
    "import math"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "def to_var(x, requires_grad=False):\n",
    "    \"\"\"\n",
    "    Automatically choose cpu or cuda\n",
    "    \"\"\"\n",
    "    if torch.cuda.is_available():\n",
    "        x = x.cuda()\n",
    "    return x.clone().detach().requires_grad_(requires_grad)\n",
    "\n",
    "class MaskedConv2d(nn.Conv2d):\n",
    "    def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n",
    "                 padding=0, dilation=1, groups=1, bias=True):\n",
    "        super(MaskedConv2d, self).__init__(in_channels, out_channels, \n",
    "            kernel_size, stride, padding, dilation, groups, bias)\n",
    "        self.mask_flag = False\n",
    "    \n",
    "    def set_mask(self, mask):\n",
    "        self.mask = to_var(mask, requires_grad=False)\n",
    "        self.weight.data = self.weight.data*self.mask.data\n",
    "        self.mask_flag = True\n",
    "    \n",
    "    def get_mask(self):\n",
    "        print(self.mask_flag)\n",
    "        return self.mask\n",
    "    \n",
    "    def forward(self, x):\n",
    "        if self.mask_flag == True:\n",
    "            weight = self.weight*self.mask\n",
    "            return F.conv2d(x, weight, self.bias, self.stride,\n",
    "                        self.padding, self.dilation, self.groups)\n",
    "        else:\n",
    "            return F.conv2d(x, self.weight, self.bias, self.stride,\n",
    "                        self.padding, self.dilation, self.groups)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ConvNet(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(ConvNet, self).__init__()\n",
    "\n",
    "        self.conv1 = MaskedConv2d(1, 32, kernel_size=3, padding=1, stride=1)\n",
    "        self.relu1 = nn.ReLU(inplace=True)\n",
    "        self.maxpool1 = nn.MaxPool2d(2)\n",
    "\n",
    "        self.conv2 = MaskedConv2d(32, 64, kernel_size=3, padding=1, stride=1)\n",
    "        self.relu2 = nn.ReLU(inplace=True)\n",
    "        self.maxpool2 = nn.MaxPool2d(2)\n",
    "\n",
    "        self.conv3 = MaskedConv2d(64, 64, kernel_size=3, padding=1, stride=1)\n",
    "        self.relu3 = nn.ReLU(inplace=True)\n",
    "\n",
    "        self.linear1 = nn.Linear(7*7*64, 10)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        out = self.maxpool1(self.relu1(self.conv1(x)))\n",
    "        out = self.maxpool2(self.relu2(self.conv2(out)))\n",
    "        out = self.relu3(self.conv3(out))\n",
    "        out = out.view(out.size(0), -1)\n",
    "        out = self.linear1(out)\n",
    "        return out\n",
    "\n",
    "    def set_masks(self, masks):\n",
    "        # Should be a less manual way to set masks\n",
    "        # Leave it for the future\n",
    "        self.conv1.set_mask(torch.from_numpy(masks[0]))\n",
    "        self.conv2.set_mask(torch.from_numpy(masks[1]))\n",
    "        self.conv3.set_mask(torch.from_numpy(masks[2]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model, device, train_loader, optimizer, epoch):\n",
    "    model.train()\n",
    "    total = 0\n",
    "    for batch_idx, (data, target) in enumerate(train_loader):\n",
    "        data, target = data.to(device), target.to(device)\n",
    "        optimizer.zero_grad()\n",
    "        output = model(data)\n",
    "        loss = F.cross_entropy(output, target)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        total += len(data)\n",
    "        progress = math.ceil(batch_idx / len(train_loader) * 50)\n",
    "        print(\"\\rTrain epoch %d: %d/%d, [%-51s] %d%%\" %\n",
    "              (epoch, total, len(train_loader.dataset),\n",
    "               '-' * progress + '>', progress * 2), end='')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [],
   "source": [
    "def test(model, device, test_loader):\n",
    "    model.eval()\n",
    "    test_loss = 0\n",
    "    correct = 0\n",
    "    with torch.no_grad():\n",
    "        for data, target in test_loader:\n",
    "            data, target = data.to(device), target.to(device)\n",
    "            output = model(data)\n",
    "            test_loss += F.cross_entropy(output, target, reduction='sum').item()  # sum up batch loss\n",
    "            pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability\n",
    "            correct += pred.eq(target.view_as(pred)).sum().item()\n",
    "\n",
    "    test_loss /= len(test_loader.dataset)\n",
    "\n",
    "    print('\\nTest: average loss: {:.4f}, accuracy: {}/{} ({:.0f}%)'.format(\n",
    "        test_loss, correct, len(test_loader.dataset),\n",
    "        100. * correct / len(test_loader.dataset)))\n",
    "    return test_loss, correct / len(test_loader.dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"Reference https://github.com/zepx/pytorch-weight-prune/\"\"\"\n",
    "def prune_rate(model, verbose=False):\n",
    "    \"\"\"\n",
    "    Print out prune rate for each layer and the whole network\n",
    "    \"\"\"\n",
    "    total_nb_param = 0\n",
    "    nb_zero_param = 0\n",
    "\n",
    "    layer_id = 0\n",
    "\n",
    "    for parameter in model.parameters():\n",
    "\n",
    "        param_this_layer = 1\n",
    "        for dim in parameter.data.size():\n",
    "            param_this_layer *= dim\n",
    "        total_nb_param += param_this_layer\n",
    "\n",
    "        # only pruning linear and conv layers\n",
    "        if len(parameter.data.size()) != 1:\n",
    "            layer_id += 1\n",
    "            zero_param_this_layer = \\\n",
    "                np.count_nonzero(parameter.cpu().data.numpy()==0)\n",
    "            nb_zero_param += zero_param_this_layer\n",
    "\n",
    "            if verbose:\n",
    "                print(\"Layer {} | {} layer | {:.2f}% parameters pruned\" \\\n",
    "                    .format(\n",
    "                        layer_id,\n",
    "                        'Conv' if len(parameter.data.size()) == 4 \\\n",
    "                            else 'Linear',\n",
    "                        100.*zero_param_this_layer/param_this_layer,\n",
    "                        ))\n",
    "    pruning_perc = 100.*nb_zero_param/total_nb_param\n",
    "    if verbose:\n",
    "        print(\"Final pruning rate: {:.2f}%\".format(pruning_perc))\n",
    "    return pruning_perc\n",
    "\n",
    "\n",
    "def arg_nonzero_min(a):\n",
    "    \"\"\"\n",
    "    nonzero argmin of a non-negative array\n",
    "    \"\"\"\n",
    "\n",
    "    if not a:\n",
    "        return\n",
    "\n",
    "    min_ix, min_v = None, None\n",
    "    # find the starting value (should be nonzero)\n",
    "    for i, e in enumerate(a):\n",
    "        if e != 0:\n",
    "            min_ix = i\n",
    "            min_v = e\n",
    "    if not min_ix:\n",
    "        print('Warning: all zero')\n",
    "        return np.inf, np.inf\n",
    "\n",
    "    # search for the smallest nonzero\n",
    "    for i, e in enumerate(a):\n",
    "         if e < min_v and e != 0:\n",
    "            min_v = e\n",
    "            min_ix = i\n",
    "\n",
    "    return min_v, min_ix\n",
    "\n",
    "def prune_one_filter(model, masks):\n",
    "    '''\n",
    "    Pruning one least ``important'' feature map by the scaled l2norm of \n",
    "    kernel weights\n",
    "    arXiv:1611.06440\n",
    "    '''\n",
    "    NO_MASKS = False\n",
    "    # construct masks if there is not yet\n",
    "    if not masks:\n",
    "        masks = []\n",
    "        NO_MASKS = True\n",
    "\n",
    "    values = []\n",
    "    for p in model.parameters():\n",
    "\n",
    "        if len(p.data.size()) == 4: # nasty way of selecting conv layer\n",
    "            p_np = p.data.cpu().numpy()\n",
    "\n",
    "            # construct masks if there is not\n",
    "            if NO_MASKS:\n",
    "                masks.append(np.ones(p_np.shape).astype('float32'))\n",
    "\n",
    "            # find the scaled l2 norm for each filter this layer\n",
    "            value_this_layer = np.square(p_np).sum(axis=1).sum(axis=1)\\\n",
    "                .sum(axis=1)/(p_np.shape[1]*p_np.shape[2]*p_np.shape[3])\n",
    "            # normalization (important)\n",
    "            value_this_layer = value_this_layer / \\\n",
    "                np.sqrt(np.square(value_this_layer).sum())\n",
    "            min_value, min_ind = arg_nonzero_min(list(value_this_layer))\n",
    "            values.append([min_value, min_ind])\n",
    "\n",
    "    assert len(masks) == len(values), \"something wrong here\"\n",
    "\n",
    "    values = np.array(values)\n",
    "\n",
    "    # set mask corresponding to the filter to prune\n",
    "    to_prune_layer_ind = np.argmin(values[:, 0])\n",
    "    to_prune_filter_ind = int(values[to_prune_layer_ind, 1])\n",
    "    masks[to_prune_layer_ind][to_prune_filter_ind] = 0.\n",
    "\n",
    "#     print('Prune filter #{} in layer #{}'.format(\n",
    "#         to_prune_filter_ind, \n",
    "#         to_prune_layer_ind))\n",
    "\n",
    "    return masks\n",
    "\n",
    "\n",
    "def filter_prune(model, pruning_perc):\n",
    "    '''\n",
    "    Prune filters one by one until reach pruning_perc\n",
    "    (not iterative pruning)\n",
    "    '''\n",
    "    masks = []\n",
    "    current_pruning_perc = 0.\n",
    "\n",
    "    while current_pruning_perc < pruning_perc:\n",
    "        masks = prune_one_filter(model, masks)\n",
    "        model.set_masks(masks)\n",
    "        current_pruning_perc = prune_rate(model, verbose=False)\n",
    "#         print('{:.2f} pruned'.format(current_pruning_perc))\n",
    "    return masks"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "def main():\n",
    "    epochs = 2\n",
    "    batch_size = 64\n",
    "    torch.manual_seed(0)\n",
    "\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "    train_loader = torch.utils.data.DataLoader(\n",
    "        datasets.MNIST('../data/MNIST', train=True, download=False,\n",
    "                       transform=transforms.Compose([\n",
    "                           transforms.ToTensor(),\n",
    "                           transforms.Normalize((0.1307,), (0.3081,))\n",
    "                       ])),\n",
    "        batch_size=batch_size, shuffle=True)\n",
    "    test_loader = torch.utils.data.DataLoader(\n",
    "        datasets.MNIST('../data/MNIST', train=False, download=False, transform=transforms.Compose([\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize((0.1307,), (0.3081,))\n",
    "        ])),\n",
    "        batch_size=1000, shuffle=True)\n",
    "\n",
    "    model = ConvNet().to(device)\n",
    "    optimizer = torch.optim.Adadelta(model.parameters())\n",
    "    \n",
    "    for epoch in range(1, epochs + 1):\n",
    "        train(model, device, train_loader, optimizer, epoch)\n",
    "        _, acc = test(model, device, test_loader)\n",
    "    \n",
    "    print('\\npruning 50%')\n",
    "    mask = filter_prune(model, 50)\n",
    "    model.set_masks(mask)\n",
    "    _, acc = test(model, device, test_loader)\n",
    "    \n",
    "    # finetune\n",
    "    print('\\nfinetune')\n",
    "    train(model, device, train_loader, optimizer, epoch)\n",
    "    _, acc = test(model, device, test_loader)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train epoch 1: 60000/60000, [-------------------------------------------------->] 100%\n",
      "Test: average loss: 0.0594, accuracy: 9797/10000 (98%)\n",
      "Train epoch 2: 60000/60000, [-------------------------------------------------->] 100%\n",
      "Test: average loss: 0.0229, accuracy: 9927/10000 (99%)\n",
      "\n",
      "pruning 50%\n",
      "\n",
      "Test: average loss: 1.4081, accuracy: 8449/10000 (84%)\n",
      "\n",
      "finetune\n",
      "Train epoch 2: 60000/60000, [-------------------------------------------------->] 100%\n",
      "Test: average loss: 0.0357, accuracy: 9876/10000 (99%)\n"
     ]
    }
   ],
   "source": [
    "main()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
