{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "B6TjnZUKk53l"
   },
   "outputs": [],
   "source": [
    "! git clone --branch feature/Tomas https://github.com/prerakmody/CS4180-DL"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "6TrfP5PolV9j"
   },
   "outputs": [],
   "source": [
    "! nvidia-smi"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "_fjLXIhQmQSX"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['/home/tomas/Documents/TUDelft/DeepLearning/CS4180-DL/demo/demo3', '/home/tomas/dohop/repo', '/home/tomas/dohop/repo/metabooking/exec_bots', '/home/tomas/dohop/repo/metabooking/booking-essentials', '/home/tomas/dohop/repo/robots', '/home/tomas/dohop/repo/py-dohop-geonames/src', '/home/tomas/Documents/TUDelft/virtualenvs/delft/lib/python37.zip', '/home/tomas/Documents/TUDelft/virtualenvs/delft/lib/python3.7', '/home/tomas/Documents/TUDelft/virtualenvs/delft/lib/python3.7/lib-dynload', '/usr/local/lib/python3.7', '', '/home/tomas/Documents/TUDelft/virtualenvs/delft/lib/python3.7/site-packages', '/home/tomas/Documents/TUDelft/virtualenvs/delft/lib/python3.7/site-packages/datasets-0.0.9-py3.7.egg', '/home/tomas/Documents/TUDelft/virtualenvs/delft/src/lasagne', '/home/tomas/Documents/TUDelft/virtualenvs/delft/lib/python3.7/site-packages/IPython/extensions', '/home/tomas/.ipython', '~/Documents/TUDelft/DeepLearning/CS4180-DL', '~/Documents/TUDelft/DeepLearning/CS4180-DL']\n"
     ]
    },
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'src'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-3-e2a4a48df8aa>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     14\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfunctional\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0msrc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpruning\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweightPruning\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayers\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mMaskedLinear\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mMaskedConv2d\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     17\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     18\u001b[0m cfg = {\n",
      "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'src'"
     ]
    }
   ],
   "source": [
    "#encoding:utf-8\n",
    "import sys\n",
    "import math\n",
    "import os\n",
    "\n",
    "dir_main = '~/Documents/TUDelft/DeepLearning/CS4180-DL'\n",
    "sys.path.append(dir_main)\n",
    "print(sys.path)\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.utils.model_zoo as model_zoo\n",
    "from torchvision import models\n",
    "import torch.nn.functional as F\n",
    "\n",
    "from src.pruning.weightPruning.layers import MaskedLinear,MaskedConv2d\n",
    "\n",
    "cfg = {\n",
    "    'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n",
    "    'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n",
    "    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],\n",
    "    'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],\n",
    "}\n",
    "\n",
    "def getYOLOv1(name=''):\n",
    "    if name != '':\n",
    "        myYOLO            = YOLOv1(name, cfg['D'], batch_norm=True)\n",
    "        VGG               = models.vgg16_bn(pretrained=True)\n",
    "        state_dict_VGG    = VGG.state_dict()\n",
    "        state_dict_myYOLO = myYOLO.state_dict()\n",
    "        \n",
    "        for k in state_dict_VGG.keys():\n",
    "            if k in state_dict_myYOLO.keys() and k.startswith('features'):\n",
    "                state_dict_myYOLO[k] = state_dict_VGG[k]\n",
    "        myYOLO.load_state_dict(state_dict_myYOLO)\n",
    "        return myYOLO\n",
    "\n",
    "    else:\n",
    "        print (' - Pass a name for your model')\n",
    "        sys.exit(1)\n",
    "    \n",
    "\n",
    "\n",
    "class YOLOv1(nn.Module):\n",
    "\n",
    "    def __init__(self, name, cfg, batch_norm, image_size=448):\n",
    "        super(YOLOv1, self).__init__()\n",
    "        self.name       = name\n",
    "        self.features   = self.getFeatureLayers(cfg, batch_norm)\n",
    "        self.linear1    = MaskedLinear(512 * 7 * 7, 4096)\n",
    "        self.linear2    = MaskedLinear(4096, 1470)\n",
    "        self.classifier = nn.Sequential( # add the regression part to the features\n",
    "            # nn.Linear(512 * 7 * 7, 4096),\n",
    "            self.linear1,\n",
    "            nn.ReLU(True),\n",
    "            nn.Dropout(),\n",
    "            # nn.Linear(4096, 1470),\n",
    "            self.linear2,\n",
    "        )\n",
    "        self._initialize_weights()\n",
    "        self.image_size = image_size\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.features(x)\n",
    "        x = x.view(x.size(0), -1)\n",
    "        x = self.classifier(x)\n",
    "        x = torch.sigmoid(x)\n",
    "        x = x.view(-1,7,7,30)\n",
    "        return x\n",
    "\n",
    "    def _initialize_weights(self):\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n",
    "                m.weight.data.normal_(0, math.sqrt(2. / n))\n",
    "                if m.bias is not None:\n",
    "                    m.bias.data.zero_()\n",
    "            elif isinstance(m, nn.BatchNorm2d):\n",
    "                m.weight.data.fill_(1)\n",
    "                m.bias.data.zero_()\n",
    "            elif isinstance(m, nn.Linear):\n",
    "                m.weight.data.normal_(0, 0.01)\n",
    "                m.bias.data.zero_()\n",
    "\n",
    "\n",
    "    def getFeatureLayers(self, cfg, batch_norm=False):\n",
    "        if (1):\n",
    "            params_in_channels  = 3\n",
    "            params_conv_stride  = 1\n",
    "            params_conv_size    = 3 \n",
    "            params_first_flag   = True\n",
    "            params_pool_stride  = 2\n",
    "            params_pool_kernel  = 2\n",
    "\n",
    "        layers = []\n",
    "        for item in cfg:\n",
    "            params_conv_stride = 1\n",
    "            if (item == 64 and params_first_flag):\n",
    "                params_conv_stride = 2\n",
    "                params_first_flag  = False\n",
    "\n",
    "            if item == 'M': # max-pooling\n",
    "                layers += [nn.MaxPool2d(kernel_size=params_pool_kernel, stride=params_pool_stride)]\n",
    "            else:\n",
    "                params_kernels = item\n",
    "                conv2d = MaskedConv2d(params_in_channels, params_kernels, kernel_size=params_conv_size, stride=params_conv_stride, padding=1)\n",
    "                if batch_norm:\n",
    "                    layers += [conv2d, nn.BatchNorm2d(item), nn.ReLU(inplace=True)]\n",
    "                else:\n",
    "                    layers += [conv2d, nn.ReLU(inplace=True)]\n",
    "                params_in_channels = item\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def set_masks(self, masks):\n",
    "        count = 0\n",
    "        for feat in self.features:\n",
    "          try:\n",
    "            if feat.name == 'MaskedConv2d':\n",
    "              feat.set_mask(masks[count])\n",
    "              count += 1\n",
    "          except:\n",
    "            pass\n",
    "        #self.linear1.set_mask(masks[count])\n",
    "        #self.linear2.set_mask(masks[count + 1])\n",
    "\n",
    "def test():\n",
    "    net = getYOLOv1()\n",
    "    img = torch.rand(1,3,448,448)\n",
    "    img = Variable(img)\n",
    "    output = net(img)\n",
    "    print(output.size())\n",
    "\n",
    "# if __name__ == '__main__':\n",
    "#     test()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "4xm-w5NNmQSa"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " - USE_GPU :  False\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import sys\n",
    "import pdb; # pdb.set_trace()\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "import torch\n",
    "from torch.utils.data import DataLoader\n",
    "import torchvision.transforms as transforms\n",
    "from tensorboardcolab import TensorBoardColab\n",
    "\n",
    "USE_GPU = torch.cuda.is_available()\n",
    "print (' - USE_GPU : ', USE_GPU)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "28e7Fj16sMPr"
   },
   "outputs": [],
   "source": [
    "LOGGER = TensorBoardColab()    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "5dIiN6yEm5hI"
   },
   "outputs": [],
   "source": [
    "\n",
    "from src.dataloader import YoloDataset\n",
    "# from src.nets import *\n",
    "from src.train import YOLOv1Train, YOLOv1Loss\n",
    "# from src.train import YOLOv1Loss\n",
    "\n",
    "torch.cuda.empty_cache()\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    \n",
    "    if (1):\n",
    "        model_name = 'yolov1'\n",
    "        model = getYOLOv1(model_name)\n",
    "        print (' - 1. Net : ', model_name)\n",
    "    \n",
    "    if (1):\n",
    "        DEBUG         = True\n",
    "        LEARNING_RATE = 0.001\n",
    "        EPOCHS        = 8\n",
    "        BATCH_SIZE    = 16\n",
    "        criterion     = YOLOv1Loss('','',5,0.5)\n",
    "        optimizer     = 'SGD'\n",
    "        \n",
    "        print (' - 2. [PARAMS] Debug      : ', DEBUG)\n",
    "        print (' - 2. [PARAMS] BATCH_SIZE : ', BATCH_SIZE)\n",
    "        print (' - 2. [PARAMS] EPOCHS     : ', EPOCHS)\n",
    "        print (' - 2. [PARAMS] Optimizer  : ', optimizer)\n",
    "        \n",
    "    if (1):\n",
    "        CHKP_DIR    = 'chkpoints'\n",
    "        CHKP_NAME   = 'yolov1_epoch%.3d.pkl' % (4)\n",
    "        CHKP_EPOCHS = 1\n",
    "        CHKP_LOAD = True\n",
    "        print (' - 2. [PARAMS_SAVE] CHKP_DIR  : ', CHKP_DIR)\n",
    "        print (' - 2. [PARAMS_SAVE] CHKP_NAME : ', CHKP_NAME)\n",
    "        \n",
    "    if (1):\n",
    "        dir_annotations  = os.path.join(dir_main, 'data/VOCdevkit_trainval/VOC2007')\n",
    "        file_annotations = os.path.join(dir_main,'data/VOCdevkit_trainval/VOC2007/anno_trainval.txt')\n",
    "        image_size       = 448\n",
    "        grid_num         = 7\n",
    "        flag_augm        = 0\n",
    "        trainFlag        = True\n",
    "\n",
    "        YoloDatasetTrain = YoloDataset(dir_annotations, file_annotations\n",
    "                                    , trainFlag\n",
    "                                    , image_size, grid_num\n",
    "                                    , flag_augm\n",
    "                                    , transform = [transforms.ToTensor()] )\n",
    "        DataLoaderTrain = DataLoader(YoloDatasetTrain, batch_size=BATCH_SIZE, shuffle=False,num_workers=0)\n",
    "        \n",
    "        dir_annotations  = os.path.join(dir_main, 'data/VOCdevkit_test/VOC2007')\n",
    "        file_annotations = os.path.join(dir_main, 'data/VOCdevkit_test/VOC2007/anno_test.txt')\n",
    "        trainFlag        = False\n",
    "        YoloDatasetTest  = YoloDataset(dir_annotations, file_annotations\n",
    "                                    , trainFlag\n",
    "                                    , image_size, grid_num\n",
    "                                    , flag_augm\n",
    "                                    , transform = [transforms.ToTensor()] )\n",
    "        DataLoaderTest   = DataLoader(YoloDatasetTest, batch_size=BATCH_SIZE, shuffle=False,num_workers=0)\n",
    "        \n",
    "        print(' - 3. [TrainDataset] %d images' % (len(YoloDatasetTrain)))\n",
    "    \n",
    "    if (1):\n",
    "        print (' - 4. Logger ')\n",
    "        pass\n",
    "#         LOGGER = TensorBoardColab()\n",
    "    \n",
    "    if (1):\n",
    "        print (' - 5. Training')\n",
    "        print (' -------------------------------------------------------------------- ')\n",
    "        trainObj = YOLOv1Train()\n",
    "        trainObj.train(model, criterion, optimizer\n",
    "                            , DataLoaderTrain, DataLoaderTest\n",
    "                            , LEARNING_RATE, EPOCHS, BATCH_SIZE\n",
    "                            , USE_GPU, LOGGER\n",
    "                            , False, CHKP_DIR, CHKP_NAME, CHKP_EPOCHS\n",
    "                            , DEBUG)\n",
    "        \n",
    "  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "4VbLqSYMoNv8"
   },
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'src'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-5-3f0998baa9ce>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0msrc\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpruning\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mweightPruning\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmethods\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mquick_filter_prune\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      2\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtorchsummary\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0msummary\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msummary\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrainObj\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minput_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m448\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m448\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m \u001b[0mpruning_perc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m90.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'src'"
     ]
    }
   ],
   "source": [
    "from src.pruning.weightPruning.methods import quick_filter_prune\n",
    "from torchsummary import summary\n",
    "\n",
    "print(summary(trainObj.model, input_size=(3,448,448)))\n",
    "pruning_perc = 90.\n",
    "masks = quick_filter_prune(trainObj.model, pruning_perc)\n",
    "trainObj.model.set_masks(masks)\n",
    "\n",
    "print(summary(trainObj.model, input_size=(3,448,448)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "pmuKfo2b3lkH"
   },
   "outputs": [],
   "source": [
    "from src.pruning.weightPruning.utils import prune_rate\n",
    "\n",
    "prune_rate(trainObj.model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 0,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "Kv7yLvwAENe-"
   },
   "outputs": [],
   "source": [
    "trainObj.train(trainObj.model, criterion, optimizer\n",
    "                            , DataLoaderTrain, DataLoaderTest\n",
    "                            , LEARNING_RATE, EPOCHS, BATCH_SIZE\n",
    "                            , USE_GPU, LOGGER\n",
    "                            , False, CHKP_DIR, CHKP_NAME, CHKP_EPOCHS\n",
    "                            , DEBUG)"
   ]
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "collapsed_sections": [],
   "name": "demo_2_colab_prune.ipynb",
   "private_outputs": true,
   "provenance": [],
   "version": "0.3.2"
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
