{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "创建一个 cfg 文件夹，然后在文件夹里下载配置文件\n",
    "```\n",
    "wget https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "写一个 parse_cfg 函数来解析我们下载的 cfg。这个 cfg 是 yolo3作者自己用的配置文件，格式不属于任何一种 python 常用的配置文件格式（作者是用 c 写的），所以我们不得不写这么一个很奇怪的解析函数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "创建一个 darknet.py 来写网络构建的代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import division\n",
    "\n",
    "import torch \n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F \n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "def parse_cfg(cfgfile):\n",
    "    \"\"\"\n",
    "    Takes a configuration file \n",
    "    \n",
    "    Returns a list of blocks. Each blocks describes a block in the neural\n",
    "    network to be built. Block is represented as a dictionary in the list\n",
    "    \n",
    "    \"\"\"\n",
    "    file = open(cfgfile, 'r')\n",
    "    lines = file.read().split('\\n')                        # store the lines in a list\n",
    "    lines = [x for x in lines if len(x) > 0]               # get read of the empty lines \n",
    "    lines = [x for x in lines if x[0] != '#']              # get rid of comments\n",
    "    lines = [x.rstrip().lstrip() for x in lines]           # get rid of fringe whitespaces\n",
    "\n",
    "    block = {}\n",
    "    blocks = []\n",
    "\n",
    "    for line in lines:\n",
    "        if line[0] == \"[\":               # This marks the start of a new block\n",
    "            if len(block) != 0:          # If block is not empty, implies it is storing values of previous block.\n",
    "                blocks.append(block)     # add it the blocks list\n",
    "                block = {}               # re-init the block\n",
    "            block[\"type\"] = line[1:-1].rstrip()     \n",
    "        else:\n",
    "            key,value = line.split(\"=\") \n",
    "            block[key.rstrip()] = value.lstrip()\n",
    "    blocks.append(block)\n",
    "\n",
    "    return blocks"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "看一眼解析的结果："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[{'type': 'net',\n",
       "  'batch': '64',\n",
       "  'subdivisions': '16',\n",
       "  'width': '608',\n",
       "  'height': '608',\n",
       "  'channels': '3',\n",
       "  'momentum': '0.9',\n",
       "  'decay': '0.0005',\n",
       "  'angle': '0',\n",
       "  'saturation': '1.5',\n",
       "  'exposure': '1.5',\n",
       "  'hue': '.1',\n",
       "  'learning_rate': '0.001',\n",
       "  'burn_in': '1000',\n",
       "  'max_batches': '500200',\n",
       "  'policy': 'steps',\n",
       "  'steps': '400000,450000',\n",
       "  'scales': '.1,.1'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '32',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '64',\n",
       "  'size': '3',\n",
       "  'stride': '2',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '32',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '64',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '3',\n",
       "  'stride': '2',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '64',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '64',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '3',\n",
       "  'stride': '2',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '3',\n",
       "  'stride': '2',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '1024',\n",
       "  'size': '3',\n",
       "  'stride': '2',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '1024',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '1024',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '1024',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '1024',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'shortcut', 'from': '-3', 'activation': 'linear'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '1024',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '1024',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '512',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '1024',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '255',\n",
       "  'activation': 'linear'},\n",
       " {'type': 'yolo',\n",
       "  'mask': '6,7,8',\n",
       "  'anchors': '10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326',\n",
       "  'classes': '80',\n",
       "  'num': '9',\n",
       "  'jitter': '.3',\n",
       "  'ignore_thresh': '.7',\n",
       "  'truth_thresh': '1',\n",
       "  'random': '1'},\n",
       " {'type': 'route', 'layers': '-4'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'upsample', 'stride': '2'},\n",
       " {'type': 'route', 'layers': '-1, 61'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '512',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '512',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '256',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '512',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '255',\n",
       "  'activation': 'linear'},\n",
       " {'type': 'yolo',\n",
       "  'mask': '3,4,5',\n",
       "  'anchors': '10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326',\n",
       "  'classes': '80',\n",
       "  'num': '9',\n",
       "  'jitter': '.3',\n",
       "  'ignore_thresh': '.7',\n",
       "  'truth_thresh': '1',\n",
       "  'random': '1'},\n",
       " {'type': 'route', 'layers': '-4'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'upsample', 'stride': '2'},\n",
       " {'type': 'route', 'layers': '-1, 36'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '256',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '256',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'filters': '128',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'batch_normalize': '1',\n",
       "  'size': '3',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '256',\n",
       "  'activation': 'leaky'},\n",
       " {'type': 'convolutional',\n",
       "  'size': '1',\n",
       "  'stride': '1',\n",
       "  'pad': '1',\n",
       "  'filters': '255',\n",
       "  'activation': 'linear'},\n",
       " {'type': 'yolo',\n",
       "  'mask': '0,1,2',\n",
       "  'anchors': '10,13,  16,30,  33,23,  30,61,  62,45,  59,119,  116,90,  156,198,  373,326',\n",
       "  'classes': '80',\n",
       "  'num': '9',\n",
       "  'jitter': '.3',\n",
       "  'ignore_thresh': '.7',\n",
       "  'truth_thresh': '1',\n",
       "  'random': '1'}]"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "parse_cfg('./src/cfg/yolov3.cfg')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "核对一下论文里的网络结构图，确认结构一致\n",
    "![](https://pic1.zhimg.com/80/v2-770e443d1ad592a70bdf31868036a3fc_1440w.jpg)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "定义两个层，一个空层EmptyLayer用于 route 和 shortcut，一个检测层 DetectionLayer用于预测目标检测的 bbox"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EmptyLayer(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(EmptyLayer, self).__init__()\n",
    "\n",
    "class DetectionLayer(nn.Module):\n",
    "    def __init__(self, anchors):\n",
    "        super(DetectionLayer, self).__init__()\n",
    "        self.anchors = anchors"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "可以看到这两个层都很简单，因为 route 层中有 concat 操作，而 shortcut 有把两个 featuremap 相加的操作，这两个操作都很简单可以直接在最终的主网络的 forward 中实现，现在先用简单的层来占位置"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "然后我们要进一步用我们解析得到的 cfg 参数，来创建网络模块，这里我们定义一个 create_modules()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "def create_modules(blocks):\n",
    "    net_info = blocks[0]        # Captures the information about the input and pre-processing    \n",
    "    module_list = nn.ModuleList()\n",
    "    prev_filters = 3            # previous feature map is an image, so the number of filters is 3 (R, G, B)\n",
    "    output_filters = []\n",
    "\n",
    "    for idx, each_block in enumerate(blocks[1:]):\n",
    "        module = nn.Sequential()\n",
    "        # check the type of block\n",
    "        # create a new module for the block\n",
    "        # append to module_list\n",
    "        if each_block['type'] == 'convolutional':\n",
    "            try:\n",
    "                bn = int(each_block['batch_normalize'])\n",
    "                bias = False\n",
    "            except:\n",
    "                bn = 0\n",
    "                bias = True\n",
    "            filters = int(each_block['filters'])\n",
    "            size = int(each_block['size'])\n",
    "            stride = int(each_block['stride'])\n",
    "            pad = int(each_block['pad'])\n",
    "            activation = each_block['activation']\n",
    "\n",
    "            if pad:\n",
    "                pad = (size - 1) // 2\n",
    "            else:\n",
    "                pad = 0\n",
    "\n",
    "            # add conv layer\n",
    "            conv = nn.Conv2d(prev_filters, filters, size, stride, pad, bias=bias)\n",
    "            module.add_module('conv_{}'.format(idx), conv)\n",
    "\n",
    "            # add bn layer\n",
    "            if bn:\n",
    "                bn = nn.BatchNorm2d(filters)\n",
    "                module.add_module('bn_{}'.format(idx), bn)\n",
    "            \n",
    "            # check the activation\n",
    "            # activation will be either leaky or linear\n",
    "            if activation == 'leaky':\n",
    "                leaky = nn.LeakyReLU(0.1, inplace=True)\n",
    "                module.add_module('leaky_{}'.format(idx), leaky)\n",
    "            \n",
    "        elif each_block['type'] == 'upsample':\n",
    "            stride = int(each_block['stride'])\n",
    "            upsample = nn.Upsample(scale_factor=2, mode='bilinear')\n",
    "            module.add_module(\"upsample_{}\".format(idx), upsample)\n",
    "\n",
    "        elif each_block['type'] == 'route':\n",
    "            layers = each_block['layers'].split(',')\n",
    "            start = int(layers[0])\n",
    "            try:\n",
    "                end = int(layers[1])\n",
    "            except:\n",
    "                end = 0\n",
    "\n",
    "            if start > 0:\n",
    "                start = start - idx \n",
    "            if end > 0:\n",
    "                end = end - idx     # a trick to let end negative, to keep end + idx correct\n",
    "            \n",
    "            route =  EmptyLayer()\n",
    "            module.add_module(\"route_{0}\".format(idx), route)\n",
    "\n",
    "            if end < 0:\n",
    "                end = output_filters[end + idx]\n",
    "            filters = output_filters[start + idx] + end\n",
    "\n",
    "        elif each_block['type'] == 'shortcut':\n",
    "            shortcut = EmptyLayer()\n",
    "            module.add_module(\"shortcut_{}\".format(idx), shortcut)\n",
    "\n",
    "        elif each_block['type'] == 'yolo':\n",
    "            mask = each_block['mask'].split(',')\n",
    "            mask = list(map(lambda x: int(x), mask))\n",
    "\n",
    "            anchors = each_block['anchors'].split(',')\n",
    "            anchors = list(map(lambda x: int(x), anchors))\n",
    "            anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors), 2)]\n",
    "            anchors = [anchors[i] for i in mask]\n",
    "\n",
    "            detection = DetectionLayer(anchors)\n",
    "            module.add_module('detection_{}'.format(idx), detection)\n",
    "        \n",
    "        module_list.append(module)\n",
    "        prev_filters = filters\n",
    "        output_filters.append(filters)\n",
    "\n",
    "    return (net_info, module_list)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "里面一些地方用了一些简单的 trick 来尽量保证代码简洁而可读性高，多看几遍应该不难理解"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "最后可以写一段代码来验证我们的网络是否正确创建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "({'type': 'net', 'batch': '64', 'subdivisions': '16', 'width': '608', 'height': '608', 'channels': '3', 'momentum': '0.9', 'decay': '0.0005', 'angle': '0', 'saturation': '1.5', 'exposure': '1.5', 'hue': '.1', 'learning_rate': '0.001', 'burn_in': '1000', 'max_batches': '500200', 'policy': 'steps', 'steps': '400000,450000', 'scales': '.1,.1'}, ModuleList(\n",
      "  (0): Sequential(\n",
      "    (conv_0): Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_0): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_0): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (1): Sequential(\n",
      "    (conv_1): Conv2d(32, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
      "    (bn_1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_1): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (2): Sequential(\n",
      "    (conv_2): Conv2d(64, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_2): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_2): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (3): Sequential(\n",
      "    (conv_3): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_3): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (4): Sequential(\n",
      "    (shortcut_4): EmptyLayer()\n",
      "  )\n",
      "  (5): Sequential(\n",
      "    (conv_5): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
      "    (bn_5): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_5): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (6): Sequential(\n",
      "    (conv_6): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_6): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_6): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (7): Sequential(\n",
      "    (conv_7): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_7): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_7): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (8): Sequential(\n",
      "    (shortcut_8): EmptyLayer()\n",
      "  )\n",
      "  (9): Sequential(\n",
      "    (conv_9): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_9): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_9): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (10): Sequential(\n",
      "    (conv_10): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_10): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_10): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (11): Sequential(\n",
      "    (shortcut_11): EmptyLayer()\n",
      "  )\n",
      "  (12): Sequential(\n",
      "    (conv_12): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
      "    (bn_12): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_12): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (13): Sequential(\n",
      "    (conv_13): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_13): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_13): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (14): Sequential(\n",
      "    (conv_14): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_14): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_14): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (15): Sequential(\n",
      "    (shortcut_15): EmptyLayer()\n",
      "  )\n",
      "  (16): Sequential(\n",
      "    (conv_16): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_16): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_16): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (17): Sequential(\n",
      "    (conv_17): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_17): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_17): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (18): Sequential(\n",
      "    (shortcut_18): EmptyLayer()\n",
      "  )\n",
      "  (19): Sequential(\n",
      "    (conv_19): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_19): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_19): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (20): Sequential(\n",
      "    (conv_20): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_20): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_20): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (21): Sequential(\n",
      "    (shortcut_21): EmptyLayer()\n",
      "  )\n",
      "  (22): Sequential(\n",
      "    (conv_22): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_22): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_22): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (23): Sequential(\n",
      "    (conv_23): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_23): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_23): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (24): Sequential(\n",
      "    (shortcut_24): EmptyLayer()\n",
      "  )\n",
      "  (25): Sequential(\n",
      "    (conv_25): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_25): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_25): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (26): Sequential(\n",
      "    (conv_26): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_26): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_26): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (27): Sequential(\n",
      "    (shortcut_27): EmptyLayer()\n",
      "  )\n",
      "  (28): Sequential(\n",
      "    (conv_28): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_28): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_28): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (29): Sequential(\n",
      "    (conv_29): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_29): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_29): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (30): Sequential(\n",
      "    (shortcut_30): EmptyLayer()\n",
      "  )\n",
      "  (31): Sequential(\n",
      "    (conv_31): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_31): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_31): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (32): Sequential(\n",
      "    (conv_32): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_32): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_32): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (33): Sequential(\n",
      "    (shortcut_33): EmptyLayer()\n",
      "  )\n",
      "  (34): Sequential(\n",
      "    (conv_34): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_34): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_34): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (35): Sequential(\n",
      "    (conv_35): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_35): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_35): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (36): Sequential(\n",
      "    (shortcut_36): EmptyLayer()\n",
      "  )\n",
      "  (37): Sequential(\n",
      "    (conv_37): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
      "    (bn_37): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_37): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (38): Sequential(\n",
      "    (conv_38): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_38): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_38): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (39): Sequential(\n",
      "    (conv_39): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_39): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_39): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (40): Sequential(\n",
      "    (shortcut_40): EmptyLayer()\n",
      "  )\n",
      "  (41): Sequential(\n",
      "    (conv_41): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_41): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_41): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (42): Sequential(\n",
      "    (conv_42): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_42): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_42): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (43): Sequential(\n",
      "    (shortcut_43): EmptyLayer()\n",
      "  )\n",
      "  (44): Sequential(\n",
      "    (conv_44): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_44): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_44): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (45): Sequential(\n",
      "    (conv_45): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_45): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_45): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (46): Sequential(\n",
      "    (shortcut_46): EmptyLayer()\n",
      "  )\n",
      "  (47): Sequential(\n",
      "    (conv_47): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_47): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_47): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (48): Sequential(\n",
      "    (conv_48): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_48): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_48): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (49): Sequential(\n",
      "    (shortcut_49): EmptyLayer()\n",
      "  )\n",
      "  (50): Sequential(\n",
      "    (conv_50): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_50): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_50): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (51): Sequential(\n",
      "    (conv_51): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_51): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_51): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (52): Sequential(\n",
      "    (shortcut_52): EmptyLayer()\n",
      "  )\n",
      "  (53): Sequential(\n",
      "    (conv_53): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_53): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_53): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (54): Sequential(\n",
      "    (conv_54): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_54): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_54): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (55): Sequential(\n",
      "    (shortcut_55): EmptyLayer()\n",
      "  )\n",
      "  (56): Sequential(\n",
      "    (conv_56): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_56): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_56): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (57): Sequential(\n",
      "    (conv_57): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_57): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_57): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (58): Sequential(\n",
      "    (shortcut_58): EmptyLayer()\n",
      "  )\n",
      "  (59): Sequential(\n",
      "    (conv_59): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_59): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_59): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (60): Sequential(\n",
      "    (conv_60): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_60): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_60): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (61): Sequential(\n",
      "    (shortcut_61): EmptyLayer()\n",
      "  )\n",
      "  (62): Sequential(\n",
      "    (conv_62): Conv2d(512, 1024, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
      "    (bn_62): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_62): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (63): Sequential(\n",
      "    (conv_63): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_63): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_63): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (64): Sequential(\n",
      "    (conv_64): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_64): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_64): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (65): Sequential(\n",
      "    (shortcut_65): EmptyLayer()\n",
      "  )\n",
      "  (66): Sequential(\n",
      "    (conv_66): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_66): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_66): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (67): Sequential(\n",
      "    (conv_67): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_67): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_67): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (68): Sequential(\n",
      "    (shortcut_68): EmptyLayer()\n",
      "  )\n",
      "  (69): Sequential(\n",
      "    (conv_69): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_69): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_69): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (70): Sequential(\n",
      "    (conv_70): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_70): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_70): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (71): Sequential(\n",
      "    (shortcut_71): EmptyLayer()\n",
      "  )\n",
      "  (72): Sequential(\n",
      "    (conv_72): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_72): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_72): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (73): Sequential(\n",
      "    (conv_73): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_73): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_73): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (74): Sequential(\n",
      "    (shortcut_74): EmptyLayer()\n",
      "  )\n",
      "  (75): Sequential(\n",
      "    (conv_75): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_75): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_75): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (76): Sequential(\n",
      "    (conv_76): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_76): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_76): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (77): Sequential(\n",
      "    (conv_77): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_77): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_77): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (78): Sequential(\n",
      "    (conv_78): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_78): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_78): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (79): Sequential(\n",
      "    (conv_79): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_79): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_79): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (80): Sequential(\n",
      "    (conv_80): Conv2d(512, 1024, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_80): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_80): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (81): Sequential(\n",
      "    (conv_81): Conv2d(1024, 255, kernel_size=(1, 1), stride=(1, 1))\n",
      "  )\n",
      "  (82): Sequential(\n",
      "    (detection_82): DetectionLayer()\n",
      "  )\n",
      "  (83): Sequential(\n",
      "    (route_83): EmptyLayer()\n",
      "  )\n",
      "  (84): Sequential(\n",
      "    (conv_84): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_84): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_84): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (85): Sequential(\n",
      "    (upsample_85): Upsample(scale_factor=2.0, mode=bilinear)\n",
      "  )\n",
      "  (86): Sequential(\n",
      "    (route_86): EmptyLayer()\n",
      "  )\n",
      "  (87): Sequential(\n",
      "    (conv_87): Conv2d(768, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_87): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_87): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (88): Sequential(\n",
      "    (conv_88): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_88): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_88): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (89): Sequential(\n",
      "    (conv_89): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_89): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_89): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (90): Sequential(\n",
      "    (conv_90): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_90): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_90): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (91): Sequential(\n",
      "    (conv_91): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_91): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_91): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (92): Sequential(\n",
      "    (conv_92): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_92): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_92): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (93): Sequential(\n",
      "    (conv_93): Conv2d(512, 255, kernel_size=(1, 1), stride=(1, 1))\n",
      "  )\n",
      "  (94): Sequential(\n",
      "    (detection_94): DetectionLayer()\n",
      "  )\n",
      "  (95): Sequential(\n",
      "    (route_95): EmptyLayer()\n",
      "  )\n",
      "  (96): Sequential(\n",
      "    (conv_96): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_96): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_96): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (97): Sequential(\n",
      "    (upsample_97): Upsample(scale_factor=2.0, mode=bilinear)\n",
      "  )\n",
      "  (98): Sequential(\n",
      "    (route_98): EmptyLayer()\n",
      "  )\n",
      "  (99): Sequential(\n",
      "    (conv_99): Conv2d(384, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_99): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_99): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (100): Sequential(\n",
      "    (conv_100): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_100): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_100): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (101): Sequential(\n",
      "    (conv_101): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_101): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_101): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (102): Sequential(\n",
      "    (conv_102): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_102): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_102): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (103): Sequential(\n",
      "    (conv_103): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
      "    (bn_103): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_103): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (104): Sequential(\n",
      "    (conv_104): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
      "    (bn_104): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (leaky_104): LeakyReLU(negative_slope=0.1, inplace=True)\n",
      "  )\n",
      "  (105): Sequential(\n",
      "    (conv_105): Conv2d(256, 255, kernel_size=(1, 1), stride=(1, 1))\n",
      "  )\n",
      "  (106): Sequential(\n",
      "    (detection_106): DetectionLayer()\n",
      "  )\n",
      "))\n"
     ]
    }
   ],
   "source": [
    "blocks = parse_cfg(\"src/cfg/yolov3.cfg\")\n",
    "print(create_modules(blocks))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "然后我们要开始正式创建网络了，forward 描述前向传播的过程，这部分跟 create_module 的代码很像，因为也是根据解析得到的 blocks 列表来一层一层传递特征图，仔细看一下会发现 route 部分是直接拿取的前面层的结果，这也印证了前面 EmptyLayer只是用来占位置，真实的concat 操作直接写在 forward 里即可"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "类似的，shortcut 也是直接把前面层的结果与本层的输入（即前一层的输出）相加"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "另外，由于shortcut 的前一层一定是卷积层，所以 x 一定会等于前一层的输出，因此没必要通过 outputs 来获取前一层结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Darknet(nn.Module):\n",
    "    def __init__(self, cfg_file):\n",
    "        super(Darknet, self).__init__()\n",
    "        self.blocks = parse_cfg(cfg_file)\n",
    "        self.net_info, self.module_list = create_modules(self.blocks)\n",
    "\n",
    "    def forward(self, x):\n",
    "        blocks = self.blocks[1:]\n",
    "        outputs = {}\n",
    "\n",
    "        for idx, block in enumerate(blocks):\n",
    "            if block['type'] == 'convolutional' or block['type'] == 'upsample':\n",
    "                x = self.module_list[idx](x)\n",
    "            \n",
    "            elif block['type'] == 'route':\n",
    "                layers = list(map(lambda x: int(x), block['layers'].split(',')))\n",
    "                \n",
    "                if layers[0] > 0:\n",
    "                    layers[0] -= idx\n",
    "                if len(layers) == 1: # len must be equal to 1 or 2\n",
    "                    x = outputs[layers[0] + idx]\n",
    "                else:\n",
    "                    if layers[1] > 0:\n",
    "                        layers[1] -= idx\n",
    "                    featuremap1 = outputs[layers[0] + idx]\n",
    "                    featuremap2 = outputs[layers[1] + idx]\n",
    "\n",
    "                    x = torch.cat((featuremap1, featuremap2), 1)\n",
    "            \n",
    "            elif block['type'] == 'shortcut':\n",
    "                x += outputs[int(block['from']) + idx]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "到这里我们写了 Darknet 主干网络的 conv, upsample, route, shortcut 四种模块的前向传播，接下来还要写 yolo 模块，即检测层的前向传播。但由于检测层的预测是用1x1卷积完成，最后得到的特征图形状为bs, (5+C)xB, H, W，很不利于我们操作（比如我想要第二个检测框的参数，就得用[:, (5+C):2*(5+C),:,:]）。为了操作直观简洁，我们要把这个四维的特征图展开成二维的，让每一行只有一个检测框的参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "from __future__ import division\n",
    "\n",
    "import torch \n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F \n",
    "import numpy as np\n",
    "import cv2 \n",
    "\n",
    "def transform_predict(pred, img_size, anchors, num_classes, device=None):\n",
    "    '''\n",
    "    Takes the prediction featuremap and some params\n",
    "\n",
    "    Return a 2-dim tensor (BHW)x(5+C) which reshape from the prediction\n",
    "    C = num_classes\n",
    "    B = len(anchors)\n",
    "    H = W = pred.size(2) = pred.size(3)\n",
    "    '''\n",
    "    batch_size = pred.size(0)\n",
    "    scale = img_size // pred.size(2) # original img size is 'scale' times largger than the pred size(due to conv)\n",
    "    grid_size = pred.size(2)         # current grid size is cur_size*cur_size\n",
    "    bbox_attrs = 5 + num_classes\n",
    "    num_anchors = len(anchors)\n",
    "\n",
    "    # we want to reshape bs, (5+C)*B, H, W -> bs, BHW, (5+C)\n",
    "    # step1: bs, (5+C)*B, H, W -> bs, (5+C)*B, HW \n",
    "    pred = pred.reshape(batch_size, bbox_attrs, grid_size*grid_size*num_anchors)\n",
    "    # step2: bs, (5+C)*B, HW -> bs, HW, (5+C)*B\n",
    "    pred = pred.transpose(1, 2)\n",
    "    # step3: bs, HW, (5+C)*B -> bs, BHW, (5+C)\n",
    "    pred = pred.reshape(batch_size, num_anchors*grid_size*grid_size, bbox_attrs)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "这个变换的展开部分用了三步才完成，理解起来有一点难度，因为原本是一个四阶张量，我们要把它展开成一个二维的特征图![](https://pic1.zhimg.com/80/v2-f00c6ab7bb46832d43f90c96120a2b80_1440w.jpg)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "直接对它 reshape 是肯定不行的，这里我做了个简单的实验，很容易就能明白为什么要用三步来完成变换："
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "一开始我们随机创建一个三阶张量 t（由于 batch_size 这一维没有变化，所以这个例子就用三阶来举例了），尺寸为2x2, 2, 2 形式上对应了(5+C)xB, H, W (为了简单，姑且当5+C=2吧）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[[ 0.3833,  1.2660],\n",
       "         [-1.7883,  1.8974]],\n",
       "\n",
       "        [[ 1.1179, -1.4644],\n",
       "         [-0.4541, -0.9651]],\n",
       "\n",
       "        [[ 1.9296,  0.9176],\n",
       "         [-0.1147,  0.3998]],\n",
       "\n",
       "        [[ 0.3357,  0.3666],\n",
       "         [ 0.4282, -0.4085]]])"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "t = torch.randn(((2*2), 2,2)) # (5+C)*B, H, W\n",
    "t"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "第一次变换合并了 H 和 W，对应原来特征图的长宽，即把一个平面拉成了一条"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.3833,  1.2660, -1.7883,  1.8974],\n",
       "        [ 1.1179, -1.4644, -0.4541, -0.9651],\n",
       "        [ 1.9296,  0.9176, -0.1147,  0.3998],\n",
       "        [ 0.3357,  0.3666,  0.4282, -0.4085]])"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "t1 = t.reshape(2*2,4) # (5+C)*B, H, W -> (5+C)*B, HW \n",
    "t1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "第二次变换交换第一第二维，把 HW 整体挪到第二维上来，保证它们不会被截断"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.3833,  1.1179,  1.9296,  0.3357],\n",
       "        [ 1.2660, -1.4644,  0.9176,  0.3666],\n",
       "        [-1.7883, -0.4541, -0.1147,  0.4282],\n",
       "        [ 1.8974, -0.9651,  0.3998, -0.4085]])"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "t2 = t1.transpose(0,1);t2 # (5+C)*B, HW -> HW, (5+C)*B"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "第三次变换限定了每行只保留5+C 个数，因此就能顺利把张量展开成我们想要的格式了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.3833,  1.1179],\n",
       "        [ 1.9296,  0.3357],\n",
       "        [ 1.2660, -1.4644],\n",
       "        [ 0.9176,  0.3666],\n",
       "        [-1.7883, -0.4541],\n",
       "        [-0.1147,  0.4282],\n",
       "        [ 1.8974, -0.9651],\n",
       "        [ 0.3998, -0.4085]])"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "t3 = t2.reshape(8,2);t3 # HW, (5+C)*B -> BHW, (5+C)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "这里可以对比一下如果直接把原始张量 reshape 成8,2会怎么样，结果是完全不一样的。所以这三步是无法省略的"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.3833,  1.2660],\n",
       "        [-1.7883,  1.8974],\n",
       "        [ 1.1179, -1.4644],\n",
       "        [-0.4541, -0.9651],\n",
       "        [ 1.9296,  0.9176],\n",
       "        [-0.1147,  0.3998],\n",
       "        [ 0.3357,  0.3666],\n",
       "        [ 0.4282, -0.4085]])"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "t.reshape(8,2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "随后我们继续完善这个函数，预设的 anchors 尺寸要对应的缩小到跟特征图相同尺度，然后给中心坐标、存在性、偏移做 sigmoid，之后把偏移加上每个单元格左上角的坐标；以及为每个单元格分配相同个数的初始 anchors，并乘以对数变换值得到在特征图上预测的 bbox 尺寸。在完成上述操作后，要把坐标尺寸等参数乘上缩放系数，还原得到在原始图片上的 bbox 数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "metadata": {},
   "outputs": [],
   "source": [
    "def transform_predict(pred, img_size, anchors, num_classes, device=None):\n",
    "    '''\n",
    "    Takes the prediction featuremap and some params\n",
    "\n",
    "    Return a 2-dim tensor (BHW)x(5+C) which reshape from the prediction\n",
    "    C = num_classes\n",
    "    B = len(anchors)\n",
    "    H = W = pred.size(2) = pred.size(3)\n",
    "    '''\n",
    "    batch_size = pred.size(0)\n",
    "    scale = img_size // pred.size(2) # original img size is 'scale' times largger than the pred size(due to conv)\n",
    "    grid_size = pred.size(2)         # current grid size is cur_size*cur_size\n",
    "    bbox_attrs = 5 + num_classes\n",
    "    num_anchors = len(anchors)\n",
    "\n",
    "    # we want to reshape bs, (5+C)*B, H, W -> bs, BHW, (5+C)\n",
    "    # step1: bs, (5+C)*B, H, W -> bs, (5+C)*B, HW \n",
    "    pred = pred.reshape(batch_size, bbox_attrs, grid_size*grid_size*num_anchors)\n",
    "    # step2: bs, (5+C)*B, HW -> bs, HW, (5+C)*B\n",
    "    pred = pred.transpose(1, 2)\n",
    "    # step3: bs, HW, (5+C)*B -> bs, BHW, (5+C)\n",
    "    pred = pred.reshape(batch_size, num_anchors*grid_size*grid_size, bbox_attrs)\n",
    "\n",
    "    anchors = [(a[0]/scale, a[1]/scale) for a in anchors]\n",
    "\n",
    "    # 5+C = tx, ty, tw, th, ob, c1, c2 ... cn\n",
    "    pred[:,:0] = pred[:,:0].sigmoid() # tx\n",
    "    pred[:,:1] = pred[:,:1].sigmoid() # ty\n",
    "    pred[:,:4] = pred[:,:4].sigmoid() # ob\n",
    "\n",
    "    grid = np.arange(grid_size)\n",
    "    x, y = np.meshgrid(grid, grid)\n",
    "    x_offset = torch.FloatTensor(x).reshape(-1,1) # g x 1\n",
    "    y_offset = torch.FloatTensor(y).reshape(-1,1) # g x 1\n",
    "\n",
    "    if device: # use GPU\n",
    "        x_offset = x_offset.to(device)\n",
    "        y_offset = y_offset.to(device)\n",
    "    \n",
    "    # concat -> g, 2\n",
    "    x_y_offset = torch.cat((x_offset, y_offset), 1) \n",
    "    # g, 2 -> g, 2xB\n",
    "    x_y_offset = x_y_offset.repeat(1, num_anchors)\n",
    "    # g, 2xB -> gxB, 2\n",
    "    x_y_offset = x_y_offset.reshape(-1, 2)\n",
    "    # gxB, 2 -> 1, gxB, 2\n",
    "    x_y_offset = x_y_offset.unsqueeze(0)\n",
    "\n",
    "    pred[:,:,:2] += x_y_offset # add offset to tx, ty\n",
    "\n",
    "    anchors = torch.FloatTensor(anchors)\n",
    "    if device:\n",
    "        anchors = anchors.to(device)\n",
    "    \n",
    "    # init anchors for every grid unit\n",
    "    # 1 x B -> HW, B\n",
    "    anchors = anchors.repeat(grid_size*grid_size, 1)\n",
    "    # HW, B -> 1, HW, B\n",
    "    anchors = anchors.unsqueeze(0)\n",
    "    # th = exp(th) * anchor_th (tw too)\n",
    "    pred[:,:,2:4] = torch.exp(pred[:,:,2:4])*anchors\n",
    "\n",
    "    # p(c) = sigmoid(c)\n",
    "    pred[:,:,5:5+num_classes] = pred[:,:,5:5+num_classes].sigmoid()\n",
    "\n",
    "    pred[:,:,:4] *= scale\n",
    "\n",
    "    return pred"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "接下来我们继续完善模型定义："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Darknet(nn.Module):\n",
    "    def __init__(self, cfg_file):\n",
    "        super(Darknet, self).__init__()\n",
    "        self.blocks = parse_cfg(cfg_file)\n",
    "        self.net_info, self.module_list = create_modules(self.blocks)\n",
    "\n",
    "    def forward(self, x):\n",
    "        blocks = self.blocks[1:]\n",
    "        outputs = {}\n",
    "\n",
    "        cnt_dets = 0\n",
    "        for idx, block in enumerate(blocks):\n",
    "            if block['type'] == 'convolutional' or block['type'] == 'upsample':\n",
    "                x = self.module_list[idx](x)\n",
    "            \n",
    "            elif block['type'] == 'route':\n",
    "                layers = list(map(lambda x: int(x), block['layers'].split(',')))\n",
    "                \n",
    "                if layers[0] > 0:\n",
    "                    layers[0] -= idx\n",
    "                if len(layers) == 1: # len must be equal to 1 or 2\n",
    "                    x = outputs[layers[0] + idx]\n",
    "                else:\n",
    "                    if layers[1] > 0:\n",
    "                        layers[1] -= idx\n",
    "                    featuremap1 = outputs[layers[0] + idx]\n",
    "                    featuremap2 = outputs[layers[1] + idx]\n",
    "\n",
    "                    x = torch.cat((featuremap1, featuremap2), 1)\n",
    "            \n",
    "            elif block['type'] == 'shortcut':\n",
    "                x += outputs[int(block['from']) + idx]\n",
    "\n",
    "            elif block['type'] == 'yolo':\n",
    "                anchors =  self.module_list[idx][0].anchors\n",
    "                img_size = int(self.net_info['height'])\n",
    "                num_classes = int(block['classes'])\n",
    "                x = transform_predict(x, img_size, anchors, num_classes)\n",
    "\n",
    "                if cnt_dets == 0:\n",
    "                    dets = x\n",
    "                else:\n",
    "                    dets = torch.cat((dets, x), 1)\n",
    "                cnt_dets += 1\n",
    "\n",
    "            outputs[idx] = x\n",
    "        return dets    \n",
    "            "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "定义完模型我们试一下输入一张图片来验证一下模型能不能顺利完成一次前向传播："
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "用以下指令拿一张图：\n",
    "```\n",
    "wget https://github.com/ayooshkathuria/pytorch-yolo-v3/raw/master/dog-cycle-car.png\n",
    "```\n",
    "![](https://github.com/ayooshkathuria/pytorch-yolo-v3/raw/master/dog-cycle-car.png)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "图片在输入神经网络之前还需要做一些简单的预处理，\n",
    "* 我们用 opencv 的库来读取图片\n",
    "* 把图片缩放成416x416\n",
    "* 并且把三个颜色通道的排布安排成 RGB（这里是 opencv 比较特殊的地方，默认三个通道顺序是 BGR；其他的图像处理库没有这个问题）\n",
    "* 最后还需要新增一维作为 batch 的维度，严格匹配网络的输入格式\n",
    "* 并用 torch 读取刚才我们预处理好的图片，转换成 Pytorch 的浮点张量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_test_input():\n",
    "    img = cv2.imread(\"dog-cycle-car.png\")\n",
    "    img = cv2.resize(img, (416,416))          #Resize to the input dimension\n",
    "    img_ =  img[:,:,::-1].transpose((2,0,1))  # BGR -> RGB | H X W C -> C X H X W \n",
    "    img_ = img_[np.newaxis,:,:,:]/255.0       #Add a channel at 0 (for batch) | Normalise\n",
    "    img_ = torch.from_numpy(img_).float()     #Convert to float\n",
    "    return img_"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "然后我们就可以构建模型，并把预处理过的图片输入模型，打印结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[2.8506e+01, 2.9370e+01, 2.2536e+02,  ..., 6.5027e-01,\n",
      "          6.5238e-01, 6.5067e-01],\n",
      "         [2.3532e+01, 2.0787e+01, 2.9245e+02,  ..., 6.2272e-01,\n",
      "          6.2550e-01, 6.2754e-01],\n",
      "         [1.9401e+01, 2.2140e+01, 7.3307e+02,  ..., 6.2744e-01,\n",
      "          6.2653e-01, 6.1711e-01],\n",
      "         ...,\n",
      "         [5.6003e+02, 5.6423e+02, 1.2923e+01,  ..., 5.6697e-01,\n",
      "          5.2232e-01, 4.7274e-01],\n",
      "         [5.5896e+02, 5.6110e+02, 1.8051e+01,  ..., 6.0637e-01,\n",
      "          4.8670e-01, 4.5249e-01],\n",
      "         [5.6047e+02, 5.6626e+02, 4.1285e+01,  ..., 5.0428e-01,\n",
      "          4.8574e-01, 4.5820e-01]]], grad_fn=<CatBackward>)\n"
     ]
    }
   ],
   "source": [
    "model = Darknet(\"src/cfg/yolov3.cfg\")\n",
    "inp = get_test_input()\n",
    "pred = model(inp)\n",
    "print (pred)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "到这里我们可以看到，我们的 darknet 已经可以成功完成前向传播完成一次inference 了，接下来我们要做的是给我们的网络加载预训练参数。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "由于 yolov3的作者是用c语言写的，所以在读取模型参数的时候，我们也需要用一个专门的函数来进行小心的解析，于是我们给 darknet 创建一个成员函数 load_weights()用来加载预训练参数，预训练参数可以通过下面的命令下载：\n",
    "```\n",
    "wget https://pjreddie.com/media/files/yolov3.weights\n",
    "\n",
    "```"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "模型的预训练参数的存储形式如下图：\n",
    "![](![image.png](https://pic2.zhimg.com/80/v2-084cff4e84cd3e4a075b0c154dfb2c4d_1440w.jpg)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "在前面的 create_module 时我们还记得，一个convolutional模块的 Sequencial 里我们有两个模块：Conv2d 和 BatchNorm2d，所以当我们枚举遍历module_list 的时候，我们可以通过 model[0]和 model[1]分别拿到以上两个模块对象，然后分别给它们加载参数。\n",
    "\n",
    "加载参数的代码中用到了一个 numpy 的函数 numel()，其含义是 the number of elements，即，返回一个数组中成员元素的个数，我们用这个来得到加载参数时指针的移动间隔"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Darknet(nn.Module):\n",
    "    def __init__(self, cfg_file):\n",
    "        super(Darknet, self).__init__()\n",
    "        self.blocks = parse_cfg(cfg_file)\n",
    "        self.net_info, self.module_list = create_modules(self.blocks)\n",
    "\n",
    "    def forward(self, x):\n",
    "        blocks = self.blocks[1:]\n",
    "        outputs = {}\n",
    "\n",
    "        cnt_dets = 0\n",
    "        for idx, block in enumerate(blocks):\n",
    "            if block['type'] == 'convolutional' or block['type'] == 'upsample':\n",
    "                x = self.module_list[idx](x)\n",
    "            \n",
    "            elif block['type'] == 'route':\n",
    "                layers = list(map(lambda x: int(x), block['layers'].split(',')))\n",
    "                \n",
    "                if layers[0] > 0:\n",
    "                    layers[0] -= idx\n",
    "                if len(layers) == 1: # len must be equal to 1 or 2\n",
    "                    x = outputs[layers[0] + idx]\n",
    "                else:\n",
    "                    if layers[1] > 0:\n",
    "                        layers[1] -= idx\n",
    "                    featuremap1 = outputs[layers[0] + idx]\n",
    "                    featuremap2 = outputs[layers[1] + idx]\n",
    "\n",
    "                    x = torch.cat((featuremap1, featuremap2), 1)\n",
    "            \n",
    "            elif block['type'] == 'shortcut':\n",
    "                x += outputs[int(block['from']) + idx]\n",
    "\n",
    "            elif block['type'] == 'yolo':\n",
    "                anchors =  self.module_list[idx][0].anchors\n",
    "                img_size = int(self.net_info['height'])\n",
    "                num_classes = int(block['classes'])\n",
    "                x = transform_predict(x, img_size, anchors, num_classes)\n",
    "\n",
    "                if cnt_dets == 0:\n",
    "                    dets = x\n",
    "                else:\n",
    "                    dets = torch.cat((dets, x), 1)\n",
    "                cnt_dets += 1\n",
    "\n",
    "            outputs[idx] = x\n",
    "        return dets    \n",
    "    \n",
    "    def load_weights(self, weights_file):            \n",
    "        with open(weights_file, 'rb') as f:\n",
    "            header = np.fromfile(f, dtype=np.int32, count=5)\n",
    "            self.header = torch.from_numpy(header)\n",
    "            self.seen = self.header[3]\n",
    "            weights = np.fromfile(f, dtype = np.float32)\n",
    "        \n",
    "        ptr = 0\n",
    "        for idx, block in enumerate(self.blocks[1:]):\n",
    "            if block['type'] == 'convolutional':\n",
    "                model = self.module_list[idx]\n",
    "                try:\n",
    "                    batch_normalize = int(block['batch_normalize'])\n",
    "                except:\n",
    "                    batch_normalize = 0\n",
    "                conv = model[0]\n",
    "\n",
    "                if batch_normalize:\n",
    "                    bn = model[1]\n",
    "\n",
    "                    # get the number of elements of an array\n",
    "                    num_bn_bias = bn.bias.numel() \n",
    "\n",
    "                    # load bn weights\n",
    "                    bn_biases = torch.from_numpy(weights[ptr:ptr+num_bn_bias])\n",
    "                    ptr += num_bn_bias\n",
    "\n",
    "                    bn_weights = torch.from_numpy(weights[ptr:ptr+num_bn_bias])\n",
    "                    ptr += num_bn_bias\n",
    "\n",
    "                    bn_running_mean = torch.from_numpy(weights[ptr:ptr+num_bn_bias])\n",
    "                    ptr += num_bn_bias\n",
    "\n",
    "                    bn_running_var = torch.from_numpy(weights[ptr:ptr+num_bn_bias])\n",
    "                    ptr += num_bn_bias\n",
    "\n",
    "                    bn_biases = bn_biases.view_as(bn.bias.data)\n",
    "                    bn_weights = bn_weights.view_as(bn.weight.data)\n",
    "                    bn_running_mean = bn_running_mean.view_as(bn.running_mean)\n",
    "                    bn_running_var = bn_running_var.view_as(bn.running_var)\n",
    "\n",
    "                    bn.bias.data.copy_(bn_biases)\n",
    "                    bn.weight.data.copy_(bn_weights)\n",
    "                    bn.running_mean.copy_(bn_running_mean)\n",
    "                    bn.running_var.copy_(bn_running_var)\n",
    "\n",
    "                else:\n",
    "                    num_bias = conv.bias.numel()\n",
    "\n",
    "                    # load conv weights\n",
    "                    conv_biases = torch.from_numpy(weights[ptr:ptr+num_bias])\n",
    "                    ptr += num_bias\n",
    "\n",
    "                    conv_biases = conv_biases.view_as(conv.bias.data)\n",
    "                    \n",
    "                    conv.bias.data.copy_(conv_biases)\n",
    "\n",
    "                num_weights = conv.weight.numel()\n",
    "\n",
    "                conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights])\n",
    "                ptr += num_weights\n",
    "\n",
    "                conv_weights = conv_weights.view_as(conv.weight.data)\n",
    "\n",
    "                conv.weight.data.copy_(conv_weights)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "接下来我们来测试一下参数加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = Darknet(\"src/cfg/yolov3.cfg\")\n",
    "model.load_weights(\"/home/jiangtao/yolov3.weights\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[2.6052e+01, 2.8182e+01, 1.9127e+02,  ..., 6.2336e-01,\n",
      "          6.2279e-01, 6.2295e-01],\n",
      "         [3.5204e+01, 1.7983e+01, 1.5601e+02,  ..., 5.0029e-01,\n",
      "          5.0035e-01, 5.0040e-01],\n",
      "         [2.6410e+01, 1.4722e+01, 3.7300e+02,  ..., 5.0009e-01,\n",
      "          5.0014e-01, 5.0011e-01],\n",
      "         ...,\n",
      "         [5.7757e+02, 5.2830e+02, 4.9218e-01,  ..., 1.2972e-07,\n",
      "          6.5837e-07, 5.3637e-08],\n",
      "         [5.7027e+02, 5.1388e+02, 6.7567e-01,  ..., 1.9121e-07,\n",
      "          2.6374e-06, 9.0638e-07],\n",
      "         [5.5042e+02, 5.2316e+02, 5.5528e+00,  ..., 2.3234e-05,\n",
      "          1.0975e-04, 3.0372e-05]]], grad_fn=<CatBackward>)\n"
     ]
    }
   ],
   "source": [
    "inp = get_test_input()\n",
    "pred = model(inp)\n",
    "print (pred)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "在进行张量处理之前，我们先准备一个 nms 函数来进行非极大值抑制算法，这是很标准的pytorch实现，具体的可以自行查阅资料和原理。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 208,
   "metadata": {},
   "outputs": [],
   "source": [
    "def nms(bboxes, conf_score, thresh):\n",
    "    res_bboxes_idx = []\n",
    "    if len(bboxes):\n",
    "        bbox_x1 = bboxes[:,0]\n",
    "        bbox_x2 = bboxes[:,1]\n",
    "        bbox_y1 = bboxes[:,2]\n",
    "        bbox_y2 = bboxes[:,3]\n",
    "\n",
    "        areas = (bbox_x2 - bbox_x1 + 1) * (bbox_y2 - bbox_y1 + 1)\n",
    "        order = torch.argsort(conf_score, descending=True)\n",
    "\n",
    "        while order.numel() > 0:\n",
    "            if order.numel() == 1:\n",
    "                idx_max_score = order.item()\n",
    "            else:\n",
    "                idx_max_score = order[0].item()\n",
    "            \n",
    "            # append the bbox with max score into result list\n",
    "            res_bboxes_idx.append(idx_max_score)\n",
    "\n",
    "            if order.numel() == 1:\n",
    "                break\n",
    "            \n",
    "            # calc iou\n",
    "            x1 = bbox_x1[order[1:]].clamp(min=bbox_x1[idx_max_score].item())\n",
    "            x2 = bbox_x2[order[1:]].clamp(max=bbox_x2[idx_max_score].item())\n",
    "            y1 = bbox_y1[order[1:]].clamp(min=bbox_y1[idx_max_score].item())\n",
    "            y2 = bbox_y2[order[1:]].clamp(max=bbox_y2[idx_max_score].item())\n",
    "            inter = (x2-x1).clamp(min=1) * (y2-y1).clamp(min=1)\n",
    "\n",
    "            iou = inter /  (areas[idx_max_score]+areas[order[1:]]-inter)\n",
    "            idx = (iou <= thresh).nonzero().squeeze()\n",
    "            if idx.numel() == 0 :\n",
    "                break\n",
    "            order = order[idx+1]\n",
    "\n",
    "    return torch.LongTensor(res_bboxes_idx)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "下面就要开始对预测结果进行处理了，这一部分的操作相对比较复杂，我尽量每一步都给出比较详细的解释"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "我们已经知道，yolo 的输出是一个 bs, BHW, C+5 维的张量，但这是包含了所有单元格、所有预设 anchor 的结果，其中有大量单元格是没有预测框的（即，该单元格预测框的 confidence 小于阈值），我们应当先把这些无效的单元格从我们的张量中删去，减少 NMS 的计算量。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 209,
   "metadata": {},
   "outputs": [],
   "source": [
    "def write_results(pred, confidence, num_classes, nms_thresh=0.4):\n",
    "    # phase 1\n",
    "    conf_mask = (pred[:,:,4] > confidence).float().unsqueeze(2)\n",
    "    pred = pred * conf_mask\n",
    "\n",
    "    # phase 2\n",
    "    bbox_corner = pred.new(pred.shape)\n",
    "    bbox_corner[:,:,0] = pred[:,:,0] - pred[:,:,2]/2\n",
    "    bbox_corner[:,:,1] = pred[:,:,1] - pred[:,:,3]/2\n",
    "    bbox_corner[:,:,3] = pred[:,:,0] + pred[:,:,2]/2\n",
    "    bbox_corner[:,:,3] = pred[:,:,1] + pred[:,:,3]/2\n",
    "    pred[:,:,:4] = bbox_corner[:,:,:4]\n",
    "\n",
    "    # phase 3\n",
    "    bs = pred.size(0)\n",
    "    write = False\n",
    "    for idx in range(bs):\n",
    "        each_img_pred = pred[idx] # BHW, 5+c\n",
    "        non_zero_idx = each_img_pred[:,4].nonzero().squeeze()\n",
    "        each_img_pred = each_img_pred[non_zero_idx,:]\n",
    "\n",
    "    # phase 4\n",
    "        max_conf, pred_class = torch.max(each_img_pred[:,5:5+num_classes], 1)\n",
    "        img_classes = pred_class.unique()\n",
    "\n",
    "    # phase 5\n",
    "        max_conf = max_conf.float().unsqueeze(1)\n",
    "        pred_class = pred_class.float().unsqueeze(1)\n",
    "        seq = (each_img_pred[:,:5], max_conf, pred_class)\n",
    "        each_img_pred = torch.cat(seq, 1)\n",
    "        \n",
    "    # phase 6\n",
    "        for cls in img_classes:\n",
    "            each_cls_pred = each_img_pred[each_img_pred[:,-1] == cls, :]\n",
    "            each_cls_pred_idx = nms(each_cls_pred[:,:4], each_cls_pred[:,4], nms_thresh)\n",
    "            \n",
    "            each_cls_pred = each_cls_pred[each_cls_pred_idx]\n",
    "            \n",
    "            batch_ind = each_cls_pred.new(each_cls_pred.size(0), 1).fill_(idx)\n",
    "            seq = (batch_ind, each_cls_pred)\n",
    "\n",
    "    # phase 7\n",
    "            if not write:\n",
    "                output = torch.cat(seq, 1)\n",
    "                write = True\n",
    "            else:\n",
    "                out = torch.cat(seq, 1)\n",
    "                output = torch.cat((output, out))\n",
    "    try:\n",
    "        return output\n",
    "    except:\n",
    "        return 0"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### phase 1\n",
    "pred[:,:,4]对应的是 bbox 的 confidence， 跟我们设置的 confidence 阈值进行比较就得到了一个 mask。由于 pred[:,:,4]是一个二维张量，所以我们还需要 unsqueeze(2)一下来跟原来的 pred 形状对齐，这样就可以直接用乘法把小于阈值的单元格的预测 bbox 坐标全部设置为0了。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### phase 2\n",
    "由于我们每一个单元格预测的bbox形式为中心坐标和宽度高度，为了计算方便我们要转换成 bbox 的左上角和右下角点坐标的形式，通过中心坐标加减长宽的一半就能得到左上角和右下角的坐标 (x1,y1), (x2, y2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### phase 3\n",
    "为了操作方便，我们用一个 for 循环来遍历 batch_size 这一维，这样每一次迭代可以只对一个(BHW, C+5)的二维张量进行操作。\n",
    "\n",
    "由于前面我们把不满足阈值的预测结构都置0了，现在我们要删掉这些无用数据。对于每个二维张量，我们用nonzero()得到非0的行索引，squeeze()是为了让索引以一维形式存放，这样就可以通过each_img_pred[non_zero_idx,:]来拿到所有非0的行形成一个新的张量。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### phase 4\n",
    "由于我们有5+C 个分类，我们要通过 max 获取置信度最高的类的索引，torch 的 max 方法会同时返回最大值以及最大值所在的位置索引，这个位置索引其实也就对应了预测的分类号。之后我们要对每个类的预测结果分别进行 NMS 操作，由于同一个类可能有大量的预测框，为了遍历方便，我们用 unique()得到唯一的预测类型列表，通过遍历这个列表来对预测的每一类进行非极大值抑制。（我们也可以直接遍历完整的 num_classes 个类，但是每个 batch 里预测的结果是有限的，不太可能出现所有的类，所以通过 unique 只遍历出现的类是更高效的）"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### phase 5\n",
    "为了操作方便，我们把bbox 坐标，bbox 的 confidence，每个bbox 预测的类别置信度，以及对应的类别，这几个信息拼成一个(D, 7)维的张量。D 是去除0后剩下的条数。通过 torch.cat(seq, 1)来进行横向拼接。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### phase 6\n",
    "用 for循环遍历刚才 unique 得到的所有类别号，用each_img_pred[each_img_pred[:,-1] == cls, :]取到对应类别号的行，然后就可以把这些结果放进 nms 里进行处理了，得到的是满足要求需要保留下来的行的索引，通过索引把不需要保留的内容过滤掉。为了方便批量操作，我们再给结果前面加上一维，存放 batch 号，如此一来结果就是一个(D2, 8)维的张量了。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### phase 7\n",
    "最终的结果通过 output 来垂直拼接， 由于向量不能跟空向量进行拼接，我们前面设置了一个 write 用来标记是否是第一个创建 output。最终的结果返回时，考虑到可能一张图没有检测到对象的情况，那么就不会有 output 被创建，所以要用 try-except 来捕捉，并在没有 output 时返回0"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "到这里我们就可以用实验图片来测试一下了"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 210,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.0000e+00,  4.7880e+02,  3.2529e+02, -3.8048e-01,  3.2529e+02,\n",
       "          3.4442e+00,  4.9423e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  7.9358e+01,  3.0839e+02, -4.1973e-01,  3.0839e+02,\n",
       "          3.2042e+00,  5.8463e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  4.6612e+02,  3.2346e+02,  5.6091e-01,  3.2346e+02,\n",
       "          3.1704e+00,  5.2029e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  5.3474e+02,  4.3720e+02, -1.6433e-02,  4.3722e+02,\n",
       "          2.5846e+00,  4.9409e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  4.7647e+02,  3.3610e+02, -1.1871e-01,  3.3611e+02,\n",
       "          2.4727e+00,  4.8722e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  4.7335e+02,  3.5534e+02, -2.0103e-01,  3.5535e+02,\n",
       "          1.8101e+00,  5.2837e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  4.8516e+02,  3.5830e+02, -2.6638e-01,  3.5830e+02,\n",
       "          1.5422e+00,  6.0255e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  1.6642e+02,  4.7130e+02,  1.5728e+00,  4.7131e+02,\n",
       "          1.2791e+00,  6.1956e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  5.4767e+02,  3.9396e+02,  1.1275e+00,  3.9396e+02,\n",
       "          1.0246e+00,  6.8414e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  4.8153e+02,  3.4768e+02, -3.3331e-01,  3.4770e+02,\n",
       "          8.8985e-01,  5.8243e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  4.9142e+02,  3.1923e+02,  2.0450e-01,  3.1933e+02,\n",
       "          8.6279e-01,  6.4709e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  4.8914e+02,  3.0907e+02, -1.4338e-01,  3.0911e+02,\n",
       "          7.4161e-01,  7.2752e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  5.0968e+02,  6.4281e+01, -2.8838e-01,  6.4418e+01,\n",
       "          6.8098e-01,  6.0475e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  2.7777e+02,  3.3953e+02,  5.1981e-01,  3.3953e+02,\n",
       "          6.3539e-01,  5.5025e-01,  2.8000e+01],\n",
       "        [ 0.0000e+00,  2.2426e+02,  2.7073e+01,  1.0527e+00,  2.7365e+01,\n",
       "          8.0260e-01,  5.6341e-01,  2.9000e+01],\n",
       "        [ 0.0000e+00,  5.5240e+02,  1.8353e+01,  4.9288e-01,  1.8363e+01,\n",
       "          5.3412e-01,  5.9874e-01,  2.9000e+01],\n",
       "        [ 0.0000e+00, -1.1373e+01, -3.6671e+01, -5.4467e-02,  6.4073e+01,\n",
       "          5.0116e-01,  6.5977e-01,  2.9000e+01],\n",
       "        [ 0.0000e+00, -6.9582e+01, -4.6033e+01,  3.7732e-16,  1.0240e+02,\n",
       "          5.0209e-01,  6.7249e-01,  3.0000e+01],\n",
       "        [ 0.0000e+00,  4.6575e+02,  3.2413e+02, -2.5005e-01,  3.2413e+02,\n",
       "          2.8968e+00,  6.0255e-01,  5.6000e+01],\n",
       "        [ 0.0000e+00,  4.6374e+02,  3.1164e+02,  4.4067e-03,  3.1164e+02,\n",
       "          2.8350e+00,  5.3056e-01,  5.6000e+01],\n",
       "        [ 0.0000e+00,  9.2179e+01,  3.4693e+02,  3.5111e-02,  3.4693e+02,\n",
       "          1.6912e+00,  5.9612e-01,  5.6000e+01],\n",
       "        [ 0.0000e+00,  2.7026e+02,  3.2632e+02,  1.1914e-01,  3.2632e+02,\n",
       "          1.4548e+00,  5.5639e-01,  5.6000e+01],\n",
       "        [ 0.0000e+00,  4.4859e+02,  3.2133e+02,  5.4559e-01,  3.2134e+02,\n",
       "          2.5190e+00,  5.4982e-01,  5.7000e+01],\n",
       "        [ 0.0000e+00,  2.7512e+02,  3.3265e+02,  1.9516e-01,  3.3266e+02,\n",
       "          2.4540e+00,  5.2951e-01,  5.7000e+01],\n",
       "        [ 0.0000e+00,  5.9478e+02, -2.5332e+01, -1.5628e-01, -2.5314e+01,\n",
       "          1.4921e+00,  5.9142e-01,  5.7000e+01],\n",
       "        [ 0.0000e+00,  5.2246e+02, -2.4904e+01,  1.9109e-01, -2.4678e+01,\n",
       "          1.2989e+00,  6.8672e-01,  5.7000e+01],\n",
       "        [ 0.0000e+00,  4.5463e+02,  3.3610e+02, -1.6423e-01,  3.3610e+02,\n",
       "          1.0673e+00,  5.4304e-01,  5.7000e+01],\n",
       "        [ 0.0000e+00,  4.4253e+02, -1.8569e+01,  1.3957e-02, -1.8563e+01,\n",
       "          8.7855e-01,  6.6320e-01,  5.7000e+01],\n",
       "        [ 0.0000e+00,  4.8192e+02, -3.6712e+00,  1.1573e-02, -2.5190e+00,\n",
       "          6.3426e-01,  4.4699e-01,  5.7000e+01],\n",
       "        [ 0.0000e+00, -1.5181e+00, -4.4991e+00, -6.4082e-02,  1.7240e+01,\n",
       "          5.0241e-01,  6.4622e-01,  5.7000e+01],\n",
       "        [ 0.0000e+00,  5.1710e+02,  5.1364e+01,  2.9128e-01,  5.1501e+01,\n",
       "          5.0001e-01,  5.5288e-01,  5.7000e+01]], grad_fn=<CatBackward>)"
      ]
     },
     "execution_count": 210,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "write_results(pred, 0.5, 80)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
