{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "demo_2_colab_prune.ipynb",
      "version": "0.3.2",
      "provenance": [],
      "private_outputs": true,
      "collapsed_sections": []
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.7.2"
    },
    "kernelspec": {
      "display_name": "Python 3",
      "language": "python",
      "name": "python3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "B6TjnZUKk53l",
        "colab": {}
      },
      "source": [
        "! git clone --branch feature/Tomas https://github.com/prerakmody/CS4180-DL"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "6TrfP5PolV9j",
        "colab": {}
      },
      "source": [
        "! nvidia-smi"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "_fjLXIhQmQSX",
        "colab": {}
      },
      "source": [
        "#encoding:utf-8\n",
        "import sys\n",
        "import math\n",
        "import os\n",
        "\n",
        "dir_main = './CS4180-DL'\n",
        "sys.path.append(dir_main)\n",
        "\n",
        "import torch\n",
        "import torch.nn as nn\n",
        "import torch.utils.model_zoo as model_zoo\n",
        "from torchvision import models\n",
        "import torch.nn.functional as F\n",
        "\n",
        "from src.pruning.weightPruning.layers import MaskedLinear,MaskedConv2d\n",
        "\n",
        "cfg = {\n",
        "    'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n",
        "    'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n",
        "    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n",
        "    'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n",
        "}\n",
        "\n",
        "def getYOLOv1(name=''):\n",
        "    if name != '':\n",
        "        myYOLO            = YOLOv1(name, cfg['D'], batch_norm=True)\n",
        "        VGG               = models.vgg16_bn(pretrained=True)\n",
        "        state_dict_VGG    = VGG.state_dict()\n",
        "        state_dict_myYOLO = myYOLO.state_dict()\n",
        "        \n",
        "        for k in state_dict_VGG.keys():\n",
        "            if k in state_dict_myYOLO.keys() and k.startswith('features'):\n",
        "                state_dict_myYOLO[k] = state_dict_VGG[k]\n",
        "        myYOLO.load_state_dict(state_dict_myYOLO)\n",
        "        return myYOLO\n",
        "\n",
        "    else:\n",
        "        print (' - Pass a name for your model')\n",
        "        sys.exit(1)\n",
        "    \n",
        "\n",
        "\n",
        "class YOLOv1(nn.Module):\n",
        "\n",
        "    def __init__(self, name, cfg, batch_norm, image_size=448):\n",
        "        super(YOLOv1, self).__init__()\n",
        "        self.name       = name\n",
        "        self.features   = self.getFeatureLayers(cfg, batch_norm)\n",
        "        self.linear1    = MaskedLinear(512 * 7 * 7, 4096)\n",
        "        self.linear2    = MaskedLinear(4096, 1470)\n",
        "        self.classifier = nn.Sequential( # add the regression part to the features\n",
        "            # nn.Linear(512 * 7 * 7, 4096),\n",
        "            self.linear1,\n",
        "            nn.ReLU(True),\n",
        "            nn.Dropout(),\n",
        "            # nn.Linear(4096, 1470),\n",
        "            self.linear2,\n",
        "        )\n",
        "        self._initialize_weights()\n",
        "        self.image_size = image_size\n",
        "\n",
        "    def forward(self, x):\n",
        "        x = self.features(x)\n",
        "        x = x.view(x.size(0), -1)\n",
        "        x = self.classifier(x)\n",
        "        x = torch.sigmoid(x)\n",
        "        x = x.view(-1,7,7,30)\n",
        "        return x\n",
        "\n",
        "    def _initialize_weights(self):\n",
        "        for m in self.modules():\n",
        "            if isinstance(m, nn.Conv2d):\n",
        "                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n",
        "                m.weight.data.normal_(0, math.sqrt(2. / n))\n",
        "                if m.bias is not None:\n",
        "                    m.bias.data.zero_()\n",
        "            elif isinstance(m, nn.BatchNorm2d):\n",
        "                m.weight.data.fill_(1)\n",
        "                m.bias.data.zero_()\n",
        "            elif isinstance(m, nn.Linear):\n",
        "                m.weight.data.normal_(0, 0.01)\n",
        "                m.bias.data.zero_()\n",
        "\n",
        "\n",
        "    def getFeatureLayers(self, cfg, batch_norm=False):\n",
        "        if (1):\n",
        "            params_in_channels  = 3\n",
        "            params_conv_stride  = 1\n",
        "            params_conv_size    = 3 \n",
        "            params_first_flag   = True\n",
        "            params_pool_stride  = 2\n",
        "            params_pool_kernel  = 2\n",
        "\n",
        "        layers = []\n",
        "        for item in cfg:\n",
        "            params_conv_stride = 1\n",
        "            if (item == 64 and params_first_flag):\n",
        "                params_conv_stride = 2\n",
        "                params_first_flag  = False\n",
        "\n",
        "            if item == 'M': # max-pooling\n",
        "                layers += [nn.MaxPool2d(kernel_size=params_pool_kernel, stride=params_pool_stride)]\n",
        "            else:\n",
        "                params_kernels = item\n",
        "                conv2d = MaskedConv2d(params_in_channels, params_kernels, kernel_size=params_conv_size, stride=params_conv_stride, padding=1)\n",
        "                if batch_norm:\n",
        "                    layers += [conv2d, nn.BatchNorm2d(item), nn.ReLU(inplace=True)]\n",
        "                else:\n",
        "                    layers += [conv2d, nn.ReLU(inplace=True)]\n",
        "                params_in_channels = item\n",
        "        return nn.Sequential(*layers)\n",
        "\n",
        "    def set_masks(self, masks):\n",
        "        count = 0\n",
        "        for feat in self.features:\n",
        "          try:\n",
        "            if feat.name == 'MaskedConv2d':\n",
        "              feat.set_mask(masks[count])\n",
        "              count += 1\n",
        "          except:\n",
        "            pass\n",
        "        self.linear1.set_mask(masks[count])\n",
        "        self.linear2.set_mask(masks[count + 1])\n",
        "\n",
        "def test():\n",
        "    net = getYOLOv1()\n",
        "    img = torch.rand(1,3,448,448)\n",
        "    img = Variable(img)\n",
        "    output = net(img)\n",
        "    print(output.size())\n",
        "\n",
        "# if __name__ == '__main__':\n",
        "#     test()"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "4xm-w5NNmQSa",
        "colab": {}
      },
      "source": [
        "import os\n",
        "import sys\n",
        "import pdb; # pdb.set_trace()\n",
        "import matplotlib.pyplot as plt\n",
        "%matplotlib inline\n",
        "\n",
        "import torch\n",
        "from torch.utils.data import DataLoader\n",
        "import torchvision.transforms as transforms\n",
        "from tensorboardcolab import TensorBoardColab\n",
        "\n",
        "USE_GPU = torch.cuda.is_available()\n",
        "print (' - USE_GPU : ', USE_GPU)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "28e7Fj16sMPr",
        "colab": {}
      },
      "source": [
        "LOGGER = TensorBoardColab()    "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "5dIiN6yEm5hI",
        "colab": {}
      },
      "source": [
        "\n",
        "from src.dataloader import YoloDataset\n",
        "# from src.nets import *\n",
        "from src.train import YOLOv1Train, YOLOv1Loss\n",
        "# from src.train import YOLOv1Loss\n",
        "\n",
        "torch.cuda.empty_cache()\n",
        "\n",
        "\n",
        "if __name__ == \"__main__\":\n",
        "    \n",
        "    if (1):\n",
        "        model_name = 'yolov1'\n",
        "        model = getYOLOv1(model_name)\n",
        "        print (' - 1. Net : ', model_name)\n",
        "    \n",
        "    if (1):\n",
        "        DEBUG         = True\n",
        "        LEARNING_RATE = 0.001\n",
        "        EPOCHS        = 8\n",
        "        BATCH_SIZE    = 16\n",
        "        criterion     = YOLOv1Loss('','',5,0.5)\n",
        "        optimizer     = 'SGD'\n",
        "        \n",
        "        print (' - 2. [PARAMS] Debug      : ', DEBUG)\n",
        "        print (' - 2. [PARAMS] BATCH_SIZE : ', BATCH_SIZE)\n",
        "        print (' - 2. [PARAMS] EPOCHS     : ', EPOCHS)\n",
        "        print (' - 2. [PARAMS] Optimizer  : ', optimizer)\n",
        "        \n",
        "    if (1):\n",
        "        CHKP_DIR    = 'chkpoints'\n",
        "        CHKP_NAME   = 'yolov1_epoch%.3d.pkl' % (4)\n",
        "        CHKP_EPOCHS = 1\n",
        "        CHKP_LOAD = True\n",
        "        print (' - 2. [PARAMS_SAVE] CHKP_DIR  : ', CHKP_DIR)\n",
        "        print (' - 2. [PARAMS_SAVE] CHKP_NAME : ', CHKP_NAME)\n",
        "        \n",
        "    if (1):\n",
        "        dir_annotations  = os.path.join(dir_main, 'data/VOCdevkit_trainval/VOC2007')\n",
        "        file_annotations = os.path.join(dir_main,'data/VOCdevkit_trainval/VOC2007/anno_trainval.txt')\n",
        "        image_size       = 448\n",
        "        grid_num         = 7\n",
        "        flag_augm        = 0\n",
        "        trainFlag        = True\n",
        "\n",
        "        YoloDatasetTrain = YoloDataset(dir_annotations, file_annotations\n",
        "                                    , trainFlag\n",
        "                                    , image_size, grid_num\n",
        "                                    , flag_augm\n",
        "                                    , transform = [transforms.ToTensor()] )\n",
        "        DataLoaderTrain = DataLoader(YoloDatasetTrain, batch_size=BATCH_SIZE, shuffle=False,num_workers=0)\n",
        "        \n",
        "        dir_annotations  = os.path.join(dir_main, 'data/VOCdevkit_test/VOC2007')\n",
        "        file_annotations = os.path.join(dir_main, 'data/VOCdevkit_test/VOC2007/anno_test.txt')\n",
        "        trainFlag        = False\n",
        "        YoloDatasetTest  = YoloDataset(dir_annotations, file_annotations\n",
        "                                    , trainFlag\n",
        "                                    , image_size, grid_num\n",
        "                                    , flag_augm\n",
        "                                    , transform = [transforms.ToTensor()] )\n",
        "        DataLoaderTest   = DataLoader(YoloDatasetTest, batch_size=BATCH_SIZE, shuffle=False,num_workers=0)\n",
        "        \n",
        "        print(' - 3. [TrainDataset] %d images' % (len(YoloDatasetTrain)))\n",
        "    \n",
        "    if (1):\n",
        "        print (' - 4. Logger ')\n",
        "        pass\n",
        "#         LOGGER = TensorBoardColab()\n",
        "    \n",
        "    if (1):\n",
        "        print (' - 5. Training')\n",
        "        print (' -------------------------------------------------------------------- ')\n",
        "        trainObj = YOLOv1Train()\n",
        "        trainObj.train(model, criterion, optimizer\n",
        "                            , DataLoaderTrain, DataLoaderTest\n",
        "                            , LEARNING_RATE, EPOCHS, BATCH_SIZE\n",
        "                            , USE_GPU, LOGGER\n",
        "                            , CHKP_LOAD, CHKP_DIR, CHKP_NAME, CHKP_EPOCHS\n",
        "                            , DEBUG)\n",
        "        \n",
        "  "
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "4VbLqSYMoNv8",
        "colab": {}
      },
      "source": [
        "from src.pruning.weightPruning.methods import weight_prune\n",
        "from torchsummary import summary\n",
        "\n",
        "print(summary(trainObj.model, input_size=(3,448,448)))\n",
        "pruning_perc = 90.\n",
        "masks = weight_prune(trainObj.model, pruning_perc)\n",
        "trainObj.model.set_masks(masks)\n",
        "\n",
        "print(summary(trainObj.model, input_size=(3,448,448)))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "pmuKfo2b3lkH",
        "colab": {}
      },
      "source": [
        "from src.pruning.weightPruning.utils import prune_rate\n",
        "\n",
        "prune_rate(trainObj.model)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "colab_type": "code",
        "id": "Kv7yLvwAENe-",
        "colab": {}
      },
      "source": [
        "trainObj.train(trainObj.model, criterion, optimizer\n",
        "                            , DataLoaderTrain, DataLoaderTest\n",
        "                            , LEARNING_RATE, EPOCHS, BATCH_SIZE\n",
        "                            , USE_GPU, LOGGER\n",
        "                            , False, CHKP_DIR, CHKP_NAME, CHKP_EPOCHS\n",
        "                            , DEBUG)"
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}