{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 74,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import absolute_import\n",
    "from __future__ import division\n",
    "from __future__ import print_function\n",
    "\n",
    "import os\n",
    "import logging\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from lib.models.ct.context_block import ContextBlock\n",
    "from collections import OrderedDict\n",
    "\n",
    "BN_MOMENTUM = 0.1\n",
    "logger = logging.getLogger(__name__)\n",
    "\n",
    "\n",
    "class DepthwiseConv2D(nn.Module):\n",
    "    def __init__(self, in_channels, kernel_size, stride, bias=False):\n",
    "        super(DepthwiseConv2D, self).__init__()\n",
    "        padding = (kernel_size - 1) // 2\n",
    "\n",
    "        self.depthwise_conv = nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, padding=padding, stride=stride, groups=in_channels, bias=bias)\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.depthwise_conv(x)\n",
    "        return out\n",
    "\n",
    "\n",
    "class Bottleneck(nn.Module):\n",
    "    expansion = 1\n",
    "    USE_GCB = False\n",
    "\n",
    "    def __init__(self, inplanes, planes, stride=1, downsample=None):\n",
    "        super(Bottleneck, self).__init__()\n",
    "        \n",
    "        #self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n",
    "        self.conv1 = GhostModule(inplanes, planes, kernel_size=1, relu=True)\n",
    "        self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n",
    "        self.conv2 = DepthwiseConv2D(planes, kernel_size=3, stride=stride)\n",
    "        self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)\n",
    "        #self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)\n",
    "        self.conv3 = GhostModule(planes, planes * self.expansion, kernel_size=1, relu=False)\n",
    "        self.bn3 = nn.BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM)\n",
    "        if self.USE_GCB:\n",
    "            self.gcb4 = ContextBlock(planes)\n",
    "        else:\n",
    "            self.gcb4 = None\n",
    "\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.downsample = downsample\n",
    "        self.stride = stride\n",
    "\n",
    "    def forward(self, x):\n",
    "        residual = x\n",
    "\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv3(out)\n",
    "        out = self.bn3(out)\n",
    "\n",
    "        if self.gcb4 is not None:\n",
    "            out = self.gcb4(out)\n",
    "\n",
    "        if self.downsample is not None:\n",
    "            residual = self.downsample(x)\n",
    "\n",
    "        out += residual\n",
    "        out = self.relu(out)\n",
    "\n",
    "        return out\n",
    "\n",
    "\n",
    "class PoseResNet(nn.Module):\n",
    "\n",
    "    def __init__(self, block, layers):\n",
    "        self.inplanes = 64\n",
    "#         extra = cfg.MODEL.EXTRA\n",
    "#         self.deconv_with_bias = extra.DECONV_WITH_BIAS\n",
    "#         self.use_gcb = extra.USE_GCB\n",
    "        self.deconv_with_bias = False\n",
    "        self.use_gcb = False\n",
    "\n",
    "        super(PoseResNet, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n",
    "                               bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n",
    "        self.layer1 = self._make_layer(block, 64, layers[0])\n",
    "        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n",
    "        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n",
    "        self.layer4 = self._make_layer(block, 512, layers[3])\n",
    "\n",
    "        # used for deconv layers\n",
    "#         self.deconv_layers = self._make_deconv_layer(\n",
    "#             extra.NUM_DECONV_LAYERS,\n",
    "#             extra.NUM_DECONV_FILTERS,\n",
    "#             extra.NUM_DECONV_KERNELS,\n",
    "#         )\n",
    "        self.deconv_layers = self._make_deconv_layer(\n",
    "            2, [256, 256], [4, 4]\n",
    "        )\n",
    "#         self.final_layer = nn.Conv2d(\n",
    "#             in_channels=extra.NUM_DECONV_FILTERS[-1],\n",
    "#             out_channels=cfg.MODEL.NUM_JOINTS,\n",
    "#             kernel_size=extra.FINAL_CONV_KERNEL,\n",
    "#             stride=1,\n",
    "#             padding=1 if extra.FINAL_CONV_KERNEL == 3 else 0\n",
    "#         )\n",
    "        self.final_layer = nn.Conv2d(\n",
    "            in_channels=256,\n",
    "            out_channels=17,\n",
    "            kernel_size=1,\n",
    "            stride=1,\n",
    "            padding=0\n",
    "        )\n",
    "\n",
    "    def _make_layer(self, block, planes, blocks, stride=1):\n",
    "        downsample = None\n",
    "        block.USE_GCB = self.use_gcb\n",
    "\n",
    "        if stride != 1 or self.inplanes != planes * block.expansion:\n",
    "            downsample = nn.Sequential(\n",
    "                nn.Conv2d(self.inplanes, planes * block.expansion,\n",
    "                          kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),\n",
    "            )\n",
    "\n",
    "        layers = []\n",
    "        layers.append(block(self.inplanes, planes, stride, downsample))\n",
    "        self.inplanes = planes * block.expansion\n",
    "        for i in range(1, blocks):\n",
    "            layers.append(block(self.inplanes, planes))\n",
    "\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def _get_deconv_cfg(self, deconv_kernel, index):\n",
    "        if deconv_kernel == 4:\n",
    "            padding = 1\n",
    "            output_padding = 0\n",
    "        elif deconv_kernel == 3:\n",
    "            padding = 1\n",
    "            output_padding = 1\n",
    "        elif deconv_kernel == 2:\n",
    "            padding = 0\n",
    "            output_padding = 0\n",
    "\n",
    "        return deconv_kernel, padding, output_padding\n",
    "\n",
    "    # (3, [256, 256, 256], [4, 4, 4])\n",
    "    def _make_deconv_layer(self, num_layers, num_filters, num_kernels):\n",
    "        assert num_layers == len(num_filters), \\\n",
    "            'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n",
    "        assert num_layers == len(num_kernels), \\\n",
    "            'ERROR: num_deconv_layers is different len(num_deconv_filters)'\n",
    "\n",
    "        layers = []\n",
    "        for i in range(num_layers):\n",
    "            kernel, padding, output_padding = \\\n",
    "                self._get_deconv_cfg(num_kernels[i], i)\n",
    "\n",
    "            planes = num_filters[i]\n",
    "            layers.append(\n",
    "                nn.ConvTranspose2d(\n",
    "                    in_channels=self.inplanes,\n",
    "                    out_channels=planes,\n",
    "                    kernel_size=kernel,\n",
    "                    stride=2,\n",
    "                    padding=padding,\n",
    "                    output_padding=output_padding,\n",
    "                    groups=planes,\n",
    "                    bias=self.deconv_with_bias))\n",
    "            layers.append(nn.Conv2d(planes, planes, kernel_size=1,\n",
    "                                    bias=False))\n",
    "            layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))\n",
    "            layers.append(nn.ReLU(inplace=True))\n",
    "            self.inplanes = planes\n",
    "\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.bn1(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.maxpool(x)\n",
    "\n",
    "        x = self.layer1(x)\n",
    "        x = self.layer2(x)\n",
    "        x = self.layer3(x)\n",
    "        x = self.layer4(x)\n",
    "\n",
    "        x = self.deconv_layers(x)\n",
    "        x = self.final_layer(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "resnet_spec = {50: (Bottleneck, [3, 4, 6, 3]),\n",
    "               101: (Bottleneck, [3, 4, 23, 3]),\n",
    "               152: (Bottleneck, [3, 8, 36, 3])}\n",
    "\n",
    "def get_pose_net():\n",
    "    num_layers = 152\n",
    "    block_class, layers = resnet_spec[num_layers]\n",
    "    model = PoseResNet(block_class, layers)\n",
    "    return model\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "test time elapsed 4.385964912280702ms\n"
     ]
    }
   ],
   "source": [
    "net = get_pose_net()\n",
    "import time\n",
    "X = torch.rand(1, 3, 192, 256)\n",
    "tsince = int(round(time.time()*1000))\n",
    "a = net(X)\n",
    "ttime_elapsed = 1000 / (int(round(time.time()*1000)) - tsince)\n",
    "print ('test time elapsed {}ms'.format(ttime_elapsed))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.Conv2d'>.\n",
      "[INFO] Register count_bn() for <class 'torch.nn.modules.batchnorm.BatchNorm2d'>.\n",
      "[INFO] Register zero_ops() for <class 'torch.nn.modules.activation.ReLU'>.\n",
      "[INFO] Register zero_ops() for <class 'torch.nn.modules.pooling.MaxPool2d'>.\n",
      "\u001b[91m[WARN] Cannot find rule for <class 'torch.nn.modules.container.Sequential'>. Treat it as zero Macs and zero Params.\u001b[00m\n",
      "\u001b[91m[WARN] Cannot find rule for <class '__main__.GhostModule'>. Treat it as zero Macs and zero Params.\u001b[00m\n",
      "\u001b[91m[WARN] Cannot find rule for <class '__main__.DepthwiseConv2D'>. Treat it as zero Macs and zero Params.\u001b[00m\n",
      "\u001b[91m[WARN] Cannot find rule for <class '__main__.Bottleneck'>. Treat it as zero Macs and zero Params.\u001b[00m\n",
      "[INFO] Register count_convNd() for <class 'torch.nn.modules.conv.ConvTranspose2d'>.\n",
      "\u001b[91m[WARN] Cannot find rule for <class '__main__.PoseResNet'>. Treat it as zero Macs and zero Params.\u001b[00m\n",
      "1283828736.0 3870289.0\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "('1.284G', '3.870M')"
      ]
     },
     "execution_count": 76,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from thop import profile\n",
    "from thop import clever_format\n",
    "macs, params = profile(net, inputs=(X, ))\n",
    "print(macs, params)\n",
    "macs, params = clever_format([macs, params], \"%.3f\")\n",
    "macs, params"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "ename": "SyntaxError",
     "evalue": "invalid syntax (<ipython-input-22-249394ae39ab>, line 1)",
     "output_type": "error",
     "traceback": [
      "\u001b[0;36m  File \u001b[0;32m\"<ipython-input-22-249394ae39ab>\"\u001b[0;36m, line \u001b[0;32m1\u001b[0m\n\u001b[0;31m    ('1.095G', '2.732M') no GCB ('1.097G', '2.897M')with GCB\u001b[0m\n\u001b[0m                          ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n"
     ]
    }
   ],
   "source": [
    "('1.095G', '2.732M') no GCB ('1.097G', '2.897M')with GCB\n",
    "('803.712M', '1.609M') no GCB ('805.454M', '1.774M') with GCB \n",
    "101 ('1.044G', '3.158M')withGCB  ('1.041G', '2.845M')no\n",
    "152 ('1.288G', '4.306M')withGCB   ('1.284G', '3.870M')no"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import math\n",
    "def _make_divisible(v, divisor, min_value=None):\n",
    "    \"\"\"\n",
    "    This function is taken from the original tf repo.\n",
    "    It ensures that all layers have a channel number that is divisible by 8\n",
    "    It can be seen here:\n",
    "    https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n",
    "    \"\"\"\n",
    "    if min_value is None:\n",
    "        min_value = divisor\n",
    "    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n",
    "    # Make sure that round down does not go down by more than 10%.\n",
    "    if new_v < 0.9 * v:\n",
    "        new_v += divisor\n",
    "    return new_v\n",
    "\n",
    "\n",
    "class SELayer(nn.Module):\n",
    "    def __init__(self, channel, reduction=4):\n",
    "        super(SELayer, self).__init__()\n",
    "        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n",
    "        self.fc = nn.Sequential(\n",
    "                nn.Linear(channel, channel // reduction),\n",
    "                nn.ReLU(inplace=True),\n",
    "                nn.Linear(channel // reduction, channel),        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        b, c, _, _ = x.size()\n",
    "        y = self.avg_pool(x).view(b, c)\n",
    "        y = self.fc(y).view(b, c, 1, 1)\n",
    "        y = torch.clamp(y, 0, 1)\n",
    "        return x * y\n",
    "\n",
    "\n",
    "def depthwise_conv(inp, oup, kernel_size=3, stride=1, relu=False):\n",
    "    return nn.Sequential(\n",
    "        nn.Conv2d(inp, oup, kernel_size, stride, kernel_size//2, groups=inp, bias=False),\n",
    "        nn.BatchNorm2d(oup),\n",
    "        nn.ReLU(inplace=True) if relu else nn.Sequential(),\n",
    "    )\n",
    "\n",
    "class GhostModule(nn.Module):\n",
    "    def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True):\n",
    "        super(GhostModule, self).__init__()\n",
    "        self.oup = oup\n",
    "        init_channels = math.ceil(oup / ratio)\n",
    "        new_channels = init_channels*(ratio-1)\n",
    "\n",
    "        self.primary_conv = nn.Sequential(\n",
    "            nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False),\n",
    "            nn.BatchNorm2d(init_channels),\n",
    "            nn.ReLU(inplace=True) if relu else nn.Sequential(),\n",
    "        )\n",
    "\n",
    "        self.cheap_operation = nn.Sequential(\n",
    "            nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False),\n",
    "            nn.BatchNorm2d(new_channels),\n",
    "            nn.ReLU(inplace=True) if relu else nn.Sequential(),\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        x1 = self.primary_conv(x)\n",
    "        x2 = self.cheap_operation(x1)\n",
    "        out = torch.cat([x1,x2], dim=1)\n",
    "        return out[:,:self.oup,:,:]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
