{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "source": [
    "import torch\n",
    "\n",
    "x = torch.randn(2,2,3)\n",
    "b = x[:,:,1]\n",
    "print(b.shape)\n",
    "print(b)\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "torch.Size([2, 2])\n",
      "tensor([[ 0.8655,  2.5026],\n",
      "        [-0.9581, -1.0368]])\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "source": [
    "a = torch.arange(1,10,1).reshape(3,3)\n",
    "m1 = (a > 5)\n",
    "m2 = (a <= 5)\n",
    "m = m1 + m2\n",
    "print(m1)\n",
    "print(m2)\n",
    "print(m)\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "tensor([[False, False, False],\n",
      "        [False, False,  True],\n",
      "        [ True,  True,  True]])\n",
      "tensor([[ True,  True,  True],\n",
      "        [ True,  True, False],\n",
      "        [False, False, False]])\n",
      "tensor([[True, True, True],\n",
      "        [True, True, True],\n",
      "        [True, True, True]])\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "source": [
    "a = torch.zeros((1,2))\n",
    "print(a.shape)\n",
    "print(torch.zeros(1,2))"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "torch.Size([1, 2])\n",
      "tensor([[0., 0.]])\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "source": [
    "a = torch.Tensor([1,2,3,4,5,6])\n",
    "_,order = a.sort(dim=0, descending=True)\n",
    "print(order)\n",
    "d = a[order[1:]]\n",
    "print(d)\n",
    "b = (a>4).nonzero()\n",
    "print(b)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "tensor([5, 4, 3, 2, 1, 0])\n",
      "tensor([5., 4., 3., 2., 1.])\n",
      "tensor([[4],\n",
      "        [5]])\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "source": [
    "import torch\n",
    "a = torch.Tensor([1])\n",
    "b = torch.Tensor([2])\n",
    "l = [a,b]\n",
    "print(l)\n",
    "c = torch.cat(l, 0)\n",
    "print(c)\n",
    "h = torch.Tensor([1,2,3,4,5,6])\n",
    "# h = torch.rand(1,5)\n",
    "_, aa = torch.max(h, 0)\n",
    "print(aa)\n",
    "print(aa.dtype)\n",
    "aaaa = torch.Tensor([aa])\n",
    "print(aaaa)\n",
    "print(aaaa.dtype)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[tensor([1.]), tensor([2.])]\n",
      "tensor([1., 2.])\n",
      "tensor(5)\n",
      "torch.int64\n",
      "tensor([5.])\n",
      "torch.float32\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "class reorg_layer(nn.Module):\n",
    "    def __init__(self, stride):\n",
    "        super(reorg_layer, self).__init__()\n",
    "        self.stride = stride\n",
    "\n",
    "    def forward(self, x):\n",
    "        batch_size, 512, height, width = x.size()\n",
    "        _height, _width = height // self.stride, width // self.stride\n",
    "        #(b, 512, 26, 26)\n",
    "        x = x.view(batch_size, 512, 13, 2, 13, 2).transpose(3, 4).contiguous()\n",
    "        x = x.view(batch_size, 512, 13 * _width, self.stride * self.stride).transpose(2, 3).contiguous()\n",
    "        x = x.view(batch_size, 512, self.stride * self.stride, _height, _width).transpose(1, 2).contiguous()\n",
    "        x = x.view(batch_size, -1, _height, _width)\n",
    "\n",
    "        return x"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "source": [
    "import torch\n",
    "\n",
    "x = torch.arange(16).reshape((1, 4,4))\n",
    "x1 = x.view( 1, 2, 2, 2, 2)\n",
    "x2 = x1.view( 1, 2, 2, 2, 2).transpose(2,3).contiguous()\n",
    "x3 = x2.view(1, 4, 4).transpose(1,2).contiguous()\n",
    "x4 = x3.view(1, 4, 2, 2).transpose(0, 1).contiguous()\n",
    "x5 = x4.view(-1, 2, 2)\n",
    "\n",
    "print(x)\n",
    "print('x1',x1)\n",
    "print('x2',x2)\n",
    "print('x3',x3)\n",
    "print('x4',x4)\n",
    "print('x5',x5)\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "tensor([[[ 0,  1,  2,  3],\n",
      "         [ 4,  5,  6,  7],\n",
      "         [ 8,  9, 10, 11],\n",
      "         [12, 13, 14, 15]]])\n",
      "x1 tensor([[[[[ 0,  1],\n",
      "           [ 2,  3]],\n",
      "\n",
      "          [[ 4,  5],\n",
      "           [ 6,  7]]],\n",
      "\n",
      "\n",
      "         [[[ 8,  9],\n",
      "           [10, 11]],\n",
      "\n",
      "          [[12, 13],\n",
      "           [14, 15]]]]])\n",
      "x2 tensor([[[[[ 0,  1],\n",
      "           [ 4,  5]],\n",
      "\n",
      "          [[ 2,  3],\n",
      "           [ 6,  7]]],\n",
      "\n",
      "\n",
      "         [[[ 8,  9],\n",
      "           [12, 13]],\n",
      "\n",
      "          [[10, 11],\n",
      "           [14, 15]]]]])\n",
      "x3 tensor([[[ 0,  2,  8, 10],\n",
      "         [ 1,  3,  9, 11],\n",
      "         [ 4,  6, 12, 14],\n",
      "         [ 5,  7, 13, 15]]])\n",
      "x4 tensor([[[[ 0,  2],\n",
      "          [ 8, 10]]],\n",
      "\n",
      "\n",
      "        [[[ 1,  3],\n",
      "          [ 9, 11]]],\n",
      "\n",
      "\n",
      "        [[[ 4,  6],\n",
      "          [12, 14]]],\n",
      "\n",
      "\n",
      "        [[[ 5,  7],\n",
      "          [13, 15]]]])\n",
      "x5 tensor([[[ 0,  2],\n",
      "         [ 8, 10]],\n",
      "\n",
      "        [[ 1,  3],\n",
      "         [ 9, 11]],\n",
      "\n",
      "        [[ 4,  6],\n",
      "         [12, 14]],\n",
      "\n",
      "        [[ 5,  7],\n",
      "         [13, 15]]])\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "source": [
    "import torch\n",
    "b = torch.Tensor([0,1,2,3,4,5,6])\n",
    "print(b[0::3])"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "tensor([0., 3., 6.])\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "source": [
    "import numpy as np\n",
    "a = np.array([[1,2,3], [4,9,6]])\n",
    "b = np.array([1,2,3,4,5])\n",
    "\n",
    "c = np.where(b > 3)[0]\n",
    "e = (b >3).nonzero()\n",
    "print(c,e)\n",
    "d = b.argsort()[::-1]\n",
    "print(d)\n",
    "d = d[[1,2,3]]\n",
    "print(d)\n"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[3 4] (array([3, 4]),)\n",
      "[4 3 2 1 0]\n",
      "[3 2 1]\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "source": [
    "a = np.array([[1,2,3,4],[5,6,7,8]])\n",
    "b = a[:, 2]\n",
    "print(b)\n",
    "print(b.shape)\n",
    "l = [3, 7]\n",
    "print(l)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "[3 7]\n",
      "(2,)\n",
      "[3, 7]\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "source": [
    "import torch\n",
    "class A:\n",
    "    a = 5\n",
    "    def __init__(self) -> None:\n",
    "        self.b = 8\n",
    "print(A.a)\n",
    "s = A()\n",
    "print(A.b)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "5\n"
     ]
    },
    {
     "output_type": "error",
     "ename": "AttributeError",
     "evalue": "type object 'A' has no attribute 'b'",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-58-b6d554c2610b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      6\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mA\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      7\u001b[0m \u001b[0ms\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mA\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mA\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mb\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m: type object 'A' has no attribute 'b'"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "source": [
    "import numpy as np\n",
    "inp_size = np.array([416, 416], dtype=np.int)   #输入图片尺寸\n",
    "print(inp_size.dtype)\n",
    "out_size = inp_size / 32\n",
    "out_size = np.array(out_size, dtype=np.int)\n",
    "print(out_size.dtype)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "int64\n",
      "int64\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "source": [
    "import torch\n",
    "import numpy as np\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import cv2\n",
    "import torchvision.transforms as transforms\n",
    "from torch.autograd import Variable\n",
    "\n",
    "class Config:\n",
    "    inp_size = np.array([416, 416], dtype=np.int)   #输入图片尺寸\n",
    "    out_size = inp_size / 32                        #输出网络特征图尺寸\n",
    "    out_size = np.array(out_size, dtype=np.int)\n",
    "    thresh = 0.5  #Pr(Object)*Pr(Classi|Object)\n",
    "    nms_thresh = 0.3\n",
    "    \n",
    "    def __init__(self) -> None:\n",
    "        pass\n",
    "\n",
    "# VOC\n",
    "class VOC:\n",
    "    def __init__(self) -> None:\n",
    "        self.label_names = ('aeroplane', 'bicycle', 'bird', 'boat',\n",
    "                'bottle', 'bus', 'car', 'cat', 'chair',\n",
    "                'cow', 'diningtable', 'dog', 'horse',\n",
    "                'motorbike', 'person', 'pottedplant',\n",
    "                'sheep', 'sofa', 'train', 'tvmonitor')\n",
    "        self.num_classes = len(self.label_names)\n",
    "\n",
    "        self.anchors = np.asarray([(1.08, 1.19), (3.42, 4.41),\n",
    "                        (6.63, 11.38), (9.42, 5.11), (16.62, 10.52)],\n",
    "                        dtype=np.float)\n",
    "        self.num_anchors = len(self.anchors)\n",
    "        base = int(np.ceil(pow(self.num_classes, 1. / 3)))\n",
    "        colors = [self._to_color(x, base) for x in range(self.num_classes)]\n",
    "    def _to_color( self, indx, base):\n",
    "        base2 = base * base\n",
    "        b = 2 - indx / base2\n",
    "        r = 2 - (indx % base2) / base\n",
    "        g = 2 - (indx % base2) % base\n",
    "        return b * 127, r * 127, g * 127\n",
    "voc = VOC()\n",
    "\n",
    "class Conv2d(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n",
    "                 relu=True, same_padding=False):\n",
    "        super(Conv2d, self).__init__()\n",
    "        padding = int((kernel_size - 1) / 2) if same_padding else 0\n",
    "        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size,\n",
    "                              stride, padding=padding)\n",
    "        self.relu = nn.LeakyReLU(0.1, inplace=True) if relu else None\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv(x)\n",
    "        if self.relu is not None:\n",
    "            x = self.relu(x)\n",
    "        return x\n",
    "\n",
    "class Conv2d_BatchNorm(nn.Module):\n",
    "    def __init__(self, in_channels, out_channes, kernel_size, stride=1,\n",
    "                 relu=True, same_padding=False):\n",
    "        super(Conv2d_BatchNorm, self).__init__()\n",
    "        padding = int((kernel_size-1) / 2) if same_padding else 0   #当stride=1时，通过padding使得图片大小不改变\n",
    "\n",
    "        self.conv = nn.Conv2d(in_channels, out_channes, kernel_size,\n",
    "                              stride, padding=padding, bias=False)\n",
    "        self.bn = nn.BatchNorm2d(out_channes, momentum=0.01)\n",
    "        self.relu = nn.LeakyReLU(0.1, inplace=True) if relu else None\n",
    "    \n",
    "    def forward(self, x):\n",
    "        x = self.conv(x)\n",
    "        x = self.bn(x)\n",
    "        if self.relu is not None:\n",
    "            x = self.relu(x)\n",
    "        return x\n",
    "\n",
    "class FC(nn.Module):\n",
    "    def __init__(self, in_features, out_features, relu=True):\n",
    "        super(FC, self).__init__()\n",
    "        self.fc = nn.Linear(in_features, out_features)\n",
    "        self.relu = nn.ReLU(inplace=True) if relu else None\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.fc(x)\n",
    "        if self.relu is not None:\n",
    "            x = self.relu(x)\n",
    "        return x\n",
    "\n",
    "class reorg_layer(nn.Module):\n",
    "    def __init__(self, stride):\n",
    "        super().__init__()\n",
    "        self.stride = stride\n",
    "    def forward(self, x):\n",
    "        batch_size, channels, height, width = x.size()\n",
    "        _height, _width = height // self.stride, width // self.stride\n",
    "        x = x.view(batch_size, channels, _height, self.stride, _width, self.stride).transpose(3, 4).contiguous()\n",
    "        x = x.view(batch_size, channels, _height * _width, self.stride * self.stride).transpose(2, 3).contiguous()\n",
    "        x = x.view(batch_size, channels, self.stride * self.stride, _height, _width).transpose(1, 2).contiguous()\n",
    "        x = x.view(batch_size, -1, _height, _width)\n",
    "\n",
    "        return x\n",
    "\n",
    "\n",
    "class Darknet19(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Darknet19, self).__init__()\n",
    "        net_cfgs = [\n",
    "            # conv1s--0-4\n",
    "            [(32, 3)],\n",
    "            ['M', (64, 3)],\n",
    "            ['M', (128, 3), (64, 1), (128, 3)],\n",
    "            ['M', (256, 3), (128, 1), (256, 3)],\n",
    "            ['M', (512, 3), (256, 1), (512, 3), (256, 1), (512, 3)],\n",
    "            # conv2--5,对应self.conv6\n",
    "            ['M', (1024, 3), (512, 1), (1024, 3), (512, 1), (1024, 3)],\n",
    "            # ------------\n",
    "            # conv3--6\n",
    "            [(1024, 3), (1024, 3)],\n",
    "            # conv4--7\n",
    "            [(1024, 3)]\n",
    "        ]\n",
    "        #darknet\n",
    "        self.conv1s, c1 = self._make_layers(3, net_cfgs[0:5])\n",
    "        self.conv2, c2 = self._make_layers(c1, net_cfgs[5])\n",
    "        self.conv3, c3 = self._make_layers(c2, net_cfgs[6])\n",
    "        stride=2\n",
    "        self.reorg = reorg_layer(stride=2)\n",
    "        self.conv4, c4 = self._make_layers((c1*(stride*stride) + c3), net_cfgs[7])\n",
    "\n",
    "        # 线性层\n",
    "        out_channels = voc.num_anchors * (voc.num_classes + 5)\n",
    "        self.conv5 = Conv2d(c4, out_channels, 1, 1, relu=False)\n",
    "        self.global_average_pool = nn.AvgPool2d((1, 1))\n",
    "\n",
    "    def _make_layers(self, in_channels, net_cfg):\n",
    "        layers = []\n",
    "\n",
    "        if len(net_cfg) > 0 and isinstance(net_cfg[0], list):\n",
    "            for sub_cfg in net_cfg:\n",
    "                layer, in_channels = self._make_layers(in_channels, sub_cfg)\n",
    "                layers.append(layer)\n",
    "        else:\n",
    "            for item in net_cfg:\n",
    "                if item == 'M':\n",
    "                    layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n",
    "                else:\n",
    "                    out_channels, ksize = item\n",
    "                    layers.append(Conv2d_BatchNorm(in_channels, out_channels, ksize,same_padding=True))\n",
    "                    in_channels = out_channels\n",
    "        return nn.Sequential(*layers), in_channels\n",
    "    \n",
    "    def forward(self, im_data, gt_boxes=None, gt_classes=None, dontcare=None,\n",
    "                size_index=0):\n",
    "        conv1s = self.conv1s(im_data)\n",
    "        conv2 = self.conv2(conv1s)\n",
    "        conv3 = self.conv3(conv2)\n",
    "        conv1s_reorg = self.reorg(conv1s)\n",
    "        cat_1_3 = torch.cat([conv1s_reorg, conv3], 1)\n",
    "        conv4 = self.conv4(cat_1_3)\n",
    "        conv5 = self.conv5(conv4)   # batch_size, out_channels, h, w\n",
    "        global_average_pool = self.global_average_pool(conv5)   # 1x1的平均池化有什么用\n",
    "\n",
    "        bsize, _, h, w = global_average_pool.size()\n",
    "        # [B, num_anchor*25, H, W]->[B, H, W, num_anchor*25]-->[B, H*W, num_anchor, 25]\n",
    "        global_average_pool_reshaped = \\\n",
    "            global_average_pool.permute(0, 2, 3, 1).contiguous().view(bsize, -1, voc.num_anchors, voc.num_classes + 5)\n",
    "        # sigmoid使得xy小于1，偏移之后始终位于cell之内\n",
    "        xy_pred = F.sigmoid(global_average_pool_reshaped[:, :, :, 0:2])\n",
    "        wh_pred = torch.exp(global_average_pool_reshaped[:, :, :, 2:4])\n",
    "        bbox_pred = torch.cat([xy_pred, wh_pred], 3)\n",
    "        iou_pred = F.sigmoid(global_average_pool_reshaped[:, :, :, 4:5])\n",
    "        score_pred = global_average_pool_reshaped[:, :, :, 5:].contiguous()\n",
    "        prob_pred = F.softmax(score_pred.view(-1, score_pred.size()[-1])).view_as(score_pred)\n",
    "\n",
    "        return bbox_pred, iou_pred, prob_pred"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "source": [
    "net = Darknet19()\n",
    "for k, v in list(net.state_dict().items()):\n",
    "    if 'num_batches_tracked' in k:\n",
    "        continue\n",
    "    print(k)\n",
    "    # param = torch.from_numpy(np.asarray(h5f[k]))\n",
    "    # v.copy_(param)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "conv1s.0.0.conv.weight\n",
      "conv1s.0.0.bn.weight\n",
      "conv1s.0.0.bn.bias\n",
      "conv1s.0.0.bn.running_mean\n",
      "conv1s.0.0.bn.running_var\n",
      "conv1s.1.1.conv.weight\n",
      "conv1s.1.1.bn.weight\n",
      "conv1s.1.1.bn.bias\n",
      "conv1s.1.1.bn.running_mean\n",
      "conv1s.1.1.bn.running_var\n",
      "conv1s.2.1.conv.weight\n",
      "conv1s.2.1.bn.weight\n",
      "conv1s.2.1.bn.bias\n",
      "conv1s.2.1.bn.running_mean\n",
      "conv1s.2.1.bn.running_var\n",
      "conv1s.2.2.conv.weight\n",
      "conv1s.2.2.bn.weight\n",
      "conv1s.2.2.bn.bias\n",
      "conv1s.2.2.bn.running_mean\n",
      "conv1s.2.2.bn.running_var\n",
      "conv1s.2.3.conv.weight\n",
      "conv1s.2.3.bn.weight\n",
      "conv1s.2.3.bn.bias\n",
      "conv1s.2.3.bn.running_mean\n",
      "conv1s.2.3.bn.running_var\n",
      "conv1s.3.1.conv.weight\n",
      "conv1s.3.1.bn.weight\n",
      "conv1s.3.1.bn.bias\n",
      "conv1s.3.1.bn.running_mean\n",
      "conv1s.3.1.bn.running_var\n",
      "conv1s.3.2.conv.weight\n",
      "conv1s.3.2.bn.weight\n",
      "conv1s.3.2.bn.bias\n",
      "conv1s.3.2.bn.running_mean\n",
      "conv1s.3.2.bn.running_var\n",
      "conv1s.3.3.conv.weight\n",
      "conv1s.3.3.bn.weight\n",
      "conv1s.3.3.bn.bias\n",
      "conv1s.3.3.bn.running_mean\n",
      "conv1s.3.3.bn.running_var\n",
      "conv1s.4.1.conv.weight\n",
      "conv1s.4.1.bn.weight\n",
      "conv1s.4.1.bn.bias\n",
      "conv1s.4.1.bn.running_mean\n",
      "conv1s.4.1.bn.running_var\n",
      "conv1s.4.2.conv.weight\n",
      "conv1s.4.2.bn.weight\n",
      "conv1s.4.2.bn.bias\n",
      "conv1s.4.2.bn.running_mean\n",
      "conv1s.4.2.bn.running_var\n",
      "conv1s.4.3.conv.weight\n",
      "conv1s.4.3.bn.weight\n",
      "conv1s.4.3.bn.bias\n",
      "conv1s.4.3.bn.running_mean\n",
      "conv1s.4.3.bn.running_var\n",
      "conv1s.4.4.conv.weight\n",
      "conv1s.4.4.bn.weight\n",
      "conv1s.4.4.bn.bias\n",
      "conv1s.4.4.bn.running_mean\n",
      "conv1s.4.4.bn.running_var\n",
      "conv1s.4.5.conv.weight\n",
      "conv1s.4.5.bn.weight\n",
      "conv1s.4.5.bn.bias\n",
      "conv1s.4.5.bn.running_mean\n",
      "conv1s.4.5.bn.running_var\n",
      "conv2.1.conv.weight\n",
      "conv2.1.bn.weight\n",
      "conv2.1.bn.bias\n",
      "conv2.1.bn.running_mean\n",
      "conv2.1.bn.running_var\n",
      "conv2.2.conv.weight\n",
      "conv2.2.bn.weight\n",
      "conv2.2.bn.bias\n",
      "conv2.2.bn.running_mean\n",
      "conv2.2.bn.running_var\n",
      "conv2.3.conv.weight\n",
      "conv2.3.bn.weight\n",
      "conv2.3.bn.bias\n",
      "conv2.3.bn.running_mean\n",
      "conv2.3.bn.running_var\n",
      "conv2.4.conv.weight\n",
      "conv2.4.bn.weight\n",
      "conv2.4.bn.bias\n",
      "conv2.4.bn.running_mean\n",
      "conv2.4.bn.running_var\n",
      "conv2.5.conv.weight\n",
      "conv2.5.bn.weight\n",
      "conv2.5.bn.bias\n",
      "conv2.5.bn.running_mean\n",
      "conv2.5.bn.running_var\n",
      "conv3.0.conv.weight\n",
      "conv3.0.bn.weight\n",
      "conv3.0.bn.bias\n",
      "conv3.0.bn.running_mean\n",
      "conv3.0.bn.running_var\n",
      "conv3.1.conv.weight\n",
      "conv3.1.bn.weight\n",
      "conv3.1.bn.bias\n",
      "conv3.1.bn.running_mean\n",
      "conv3.1.bn.running_var\n",
      "conv4.0.conv.weight\n",
      "conv4.0.bn.weight\n",
      "conv4.0.bn.bias\n",
      "conv4.0.bn.running_mean\n",
      "conv4.0.bn.running_var\n",
      "conv5.conv.weight\n",
      "conv5.conv.bias\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "import h5py\n",
    "import numpy as np\n",
    "\n",
    "h5f = h5py.File('/home/yinzp/gitee/ckpt/yolo-voc.weights.h5', mode='r')\n",
    "net = Darknet19()\n",
    "for k, v in list(net.state_dict().items()):\n",
    "    if 'num_batches_tracked' in k:\n",
    "        continue\n",
    "    param = torch.from_numpy(np.asarray(h5f[k]))\n",
    "    v.copy_(param)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "source": [
    "from collections import OrderedDict\n",
    "\n",
    "l = []\n",
    "l.append(('a', 97))\n",
    "c = OrderedDict(l)\n",
    "print(c)\n",
    "for k, v in c.items():\n",
    "    print(k, v)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "OrderedDict([('a', 97)])\n",
      "a 97\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "source": [
    "import torch\n",
    "\n",
    "an = torch.Tensor([[1,2], [3,4], [5,6]])\n",
    "a = an.index_select(1, torch.LongTensor([0]))\n",
    "print(a)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "tensor([[1.],\n",
      "        [3.],\n",
      "        [5.]])\n"
     ]
    }
   ],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python",
   "version": "3.6.9",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.6.9 64-bit"
  },
  "interpreter": {
   "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}