{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Fast RCNN\n",
    "总体流程：\n",
    "1. 将图片插值，填充成长和宽为32的倍数\n",
    "2. 将填充后的图片\n",
    "## RCNN存在的问题\n",
    "1. 训练分多步\n",
    "2. 时间和内存消耗大：在训练SVM和回归的时候需要用网络训练的特征作为输入，特征保存在磁盘上再读入的时间消耗还是比较大的\n",
    "3. 测试慢：每张图片的每个region proposal都要做卷积，重复操作太多"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "import warnings\n",
    "from torch.utils.data import Dataset\n",
    "import os\n",
    "import torch\n",
    "import torchvision\n",
    "import torch.nn as nn\n",
    "from torch.nn import functional as F\n",
    "import json\n",
    "from PIL import Image\n",
    "from lxml import etree\n",
    "import math\n",
    "from typing import List, Tuple, Dict, Optional\n",
    "from torch import nn, Tensor\n",
    "from collections import OrderedDict\n",
    "from torchvision.ops import MultiScaleRoIAlign\n",
    "from torchvision.ops.misc import FrozenBatchNorm2d"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据集制作"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class VOCDataSet(Dataset):\n",
    "    \"\"\"\n",
    "    输入索引值，输出一张image和对应的target\n",
    "    Inputs:\n",
    "        idx\n",
    "    Outputs:\n",
    "        image:\n",
    "        target[Dict[str, tensor]]：{\"boxes\": tensor, labels:,...}\n",
    "    \"\"\"\n",
    "    def __init__(self, voc_root, year=\"2012\", transforms=None, txt_name: str = \"train.txt\"):\n",
    "        assert year in [\"2007\", \"2012\"], \"year must be in ['2007', '2012']\"\n",
    "        self.root = os.path.join(voc_root, \"VOCdevkit\", f\"VOC{year}\")\n",
    "        self.img_root = os.path.join(self.root, \"JPEGImages\")\n",
    "        self.annotations_root = os.path.join(self.root, \"Annotations\")\n",
    "\n",
    "        # train.txt地址，里面\n",
    "        txt_path = os.path.join(self.root, \"ImageSets\", \"Main\", txt_name)\n",
    "        assert os.path.exists(txt_path), \"not found {} file.\".format(txt_name)\n",
    "\n",
    "        with open(txt_path) as read:\n",
    "            self.xml_list = [os.path.join(self.annotations_root, line.strip() + \".xml\")\n",
    "                             for line in read.readlines() if len(line.strip()) > 0]\n",
    "\n",
    "        # check file\n",
    "        assert len(self.xml_list) > 0, \"in '{}' file does not find any information.\".format(txt_path)\n",
    "        for xml_path in self.xml_list:\n",
    "            assert os.path.exists(xml_path), \"not found '{}' file.\".format(xml_path)\n",
    "\n",
    "        # read class_indict\n",
    "        json_file = os.path.join(self.root, \"pascal_voc_classes.json\")\n",
    "        # json_file = './pascal_voc_classes.json'\n",
    "        assert os.path.exists(json_file), \"{} file not exist.\".format(json_file)\n",
    "        json_file = open(json_file, 'r')\n",
    "        self.class_dict = json.load(json_file)\n",
    "        json_file.close()\n",
    "\n",
    "        self.transforms = transforms\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.xml_list)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        # 读取xml\n",
    "        xml_path = self.xml_list[idx]\n",
    "        with open(xml_path) as fid:\n",
    "            xml_str = fid.read()\n",
    "        xml = etree.fromstring(xml_str)\n",
    "        data = self.parse_xml_to_dict(xml)[\"annotation\"]\n",
    "        img_path = os.path.join(self.img_root, data[\"filename\"])\n",
    "        image = Image.open(img_path)\n",
    "        if image.format != \"JPEG\":\n",
    "            raise ValueError(\"Image '{}' format not JPEG\".format(img_path))\n",
    "\n",
    "        boxes = []\n",
    "        labels = []\n",
    "        iscrowd = []\n",
    "        assert \"object\" in data, \"{} lack of object information.\".format(xml_path)\n",
    "        for obj in data[\"object\"]:\n",
    "            xmin = float(obj[\"bndbox\"][\"xmin\"])\n",
    "            xmax = float(obj[\"bndbox\"][\"xmax\"])\n",
    "            ymin = float(obj[\"bndbox\"][\"ymin\"])\n",
    "            ymax = float(obj[\"bndbox\"][\"ymax\"])\n",
    "\n",
    "            # 进一步检查数据，有的标注信息中可能有w或h为0的情况，这样的数据会导致计算回归loss为nan\n",
    "            if xmax <= xmin or ymax <= ymin:\n",
    "                print(\"Warning: in '{}' xml, there are some bbox w/h <=0\".format(xml_path))\n",
    "                continue\n",
    "            \n",
    "            boxes.append([xmin, ymin, xmax, ymax])\n",
    "            labels.append(self.class_dict[obj[\"name\"]])\n",
    "            if \"difficult\" in obj:\n",
    "                iscrowd.append(int(obj[\"difficult\"]))\n",
    "            else:\n",
    "                iscrowd.append(0)\n",
    "\n",
    "        # convert everything into a torch.Tensor\n",
    "        boxes = torch.as_tensor(boxes, dtype=torch.float32)\n",
    "        labels = torch.as_tensor(labels, dtype=torch.int64)\n",
    "        iscrowd = torch.as_tensor(iscrowd, dtype=torch.int64)\n",
    "        image_id = torch.tensor([idx])\n",
    "        area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n",
    "\n",
    "        target = {}\n",
    "        target[\"boxes\"] = boxes\n",
    "        target[\"labels\"] = labels\n",
    "        target[\"image_id\"] = image_id\n",
    "        target[\"area\"] = area\n",
    "        target[\"iscrowd\"] = iscrowd\n",
    "\n",
    "        if self.transforms is not None:\n",
    "            image, target = self.transforms(image, target)\n",
    "\n",
    "        return image, target\n",
    "\n",
    "    def get_height_and_width(self, idx):\n",
    "        # read xml\n",
    "        xml_path = self.xml_list[idx]\n",
    "        with open(xml_path) as fid:\n",
    "            xml_str = fid.read()\n",
    "        xml = etree.fromstring(xml_str)\n",
    "        data = self.parse_xml_to_dict(xml)[\"annotation\"]\n",
    "        data_height = int(data[\"size\"][\"height\"])\n",
    "        data_width = int(data[\"size\"][\"width\"])\n",
    "        return data_height, data_width\n",
    "        \n",
    "    \n",
    "    def parse_xml_to_dict(self, xml):\n",
    "\n",
    "        if len(xml) == 0:\n",
    "            return {xml.tag:xml.text}\n",
    "        \n",
    "        result = {}\n",
    "        for child in xml:\n",
    "            child_result = self.parse_xml_to_dict(child)\n",
    "            if child.tag != 'object':\n",
    "                result[child.tag] = child_result[child.tag]\n",
    "            else:\n",
    "                if child.tag not in result:\n",
    "                    result[child.tag] = []\n",
    "                result[child.tag].append(child_result[child.tag])\n",
    "        return {xml.tag:result}\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 图像预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ImageList(object):\n",
    "    def __init__(self, tensors, image_sizes):\n",
    "        \"\"\"\n",
    "\n",
    "        :param tensors:\n",
    "        :param image_sizes: 插值后，填充前的尺寸\n",
    "        \"\"\"\n",
    "        # type: (Tensor, List[tuple[int, int]]) -> None\n",
    "        self.tensors = tensors\n",
    "        self.image_sizes = image_sizes\n",
    "    \n",
    "    def to(self, device):\n",
    "        # type: (Device) -> ImageList # noqa\n",
    "        cast_tensor = self.tensors.to(device)\n",
    "        return ImageList(cast_tensor, self.image_sizes)\n",
    "\n",
    "def resize_boxes(boxes, original_size, new_size):\n",
    "    \"\"\"\n",
    "    将输入的boxes中，对应于original_size的位置坐标转化为对应于new_size\n",
    "    Args:\n",
    "        boxes:Tensor:输入的bbox[n, 4]\n",
    "        original_size:Tuple[int,int]：图片缩放前尺寸\n",
    "        new_size:Tuple[int, int]：缩放后尺寸\n",
    "    \"\"\"\n",
    "    ratios = [\n",
    "        torch.tensor(s, dtype=torch.float32, device=boxes.device) /\n",
    "        torch.tensor(s_orig, dtype=torch.float32, device=boxes.device)\n",
    "        for s, s_orig in zip(new_size, original_size)\n",
    "    ]\n",
    "\n",
    "    ratios_height, ratios_width = ratios\n",
    "    # Removes a tensor dimension, boxes [minibatch, 4]\n",
    "    # Returns a tuple of all slices along a given dimension, already without it.\n",
    "    xmin, ymin, xmax, ymax = boxes.unbind(1)\n",
    "    xmin = xmin * ratios_width\n",
    "    xmax = xmax * ratios_width\n",
    "    ymin = ymin * ratios_height\n",
    "    ymax = ymax * ratios_height\n",
    "    return torch.stack((xmin, ymin, xmax, ymax), dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class GeneralizedRCNNTransform(nn.Module):\n",
    "    \"\"\"\n",
    "    输入的batch中的各张图片尺寸可能不一样，处理流程为：(1-2步对输入图片和标签都要进行，3无需对标签进行)\n",
    "    1. 将batch中的图片分别进行标准化；\n",
    "    2. 保持长宽比的缩放（双线性插值）：\n",
    "        A. 最短边缩放至网络最小输入尺寸\n",
    "        B. 如果按照A操作时，最长边超过网络最大输入尺寸，则进一步将图片最长边缩小之网络最大输入尺寸（此时最短边会比网络最小输入尺寸小）\n",
    "    3. 将batch中各张插值完的图片进行右边和左边的0填充，h和w统一向上填充为32的整数倍，使得batch中的图片尺寸都相同\n",
    "    输入：\n",
    "        images:一个batch的图片\n",
    "    返回：\n",
    "        image_list：插值和填充完的batch_images，以及插值后、填充前的各个图片的h和w\n",
    "    \"\"\"\n",
    "    def __init__(self, min_size, max_size, image_mean, image_std):\n",
    "        super(GeneralizedRCNNTransform, self).__init__()\n",
    "        if not isinstance(min_size, (list, tuple)):\n",
    "            min_size = (min_size,)\n",
    "        self.min_size = min_size        #指定输入网络的图像最小边长\n",
    "        self.max_size = max_size        #指定输入网络的图像最大边长\n",
    "        self.image_mean = image_mean\n",
    "        self.image_std = image_std\n",
    "    \n",
    "    def normalize(self, image):\n",
    "        \"\"\"标准化处理\"\"\"\n",
    "        dtype, device = image.dtype, image.device\n",
    "        mean = torch.as_tensor(self.image_mean, dtype=dtype, device=device)\n",
    "        std = torch.as_tensor(self.image_std, dtype=dtype, device=device)\n",
    "        # [:, None, None]: shape [3] -> [3, 1, 1]\n",
    "        return (image - mean[:, None, None]) / std[:, None, None]\n",
    "\n",
    "    def torch_choice(self, k):\n",
    "        index = int(torch.empty(1).uniform_(0., float(len(k))).item())\n",
    "        return k[index]\n",
    "\n",
    "    def resize(self, image, target):\n",
    "        \"\"\"将输入图像的短边缩放成网络最低输入尺寸或者长边缩放成最大尺寸，另一条边自适应。保持了图片原有的长宽比\n",
    "        输入：一张图片\n",
    "        \"\"\"\n",
    "        h, w = image.shape[-2:]\n",
    "        im_shape = torch.tensor(image.shape[-2:])\n",
    "        short_edge = float(torch.min(im_shape))   #获取长宽中的最小值\n",
    "        long_edge = float(torch.max(im_shape))   #获取长宽中的最大值\n",
    "\n",
    "        if self.training:\n",
    "            size = float(self.torch_choice(self.min_szie))  \n",
    "        else:\n",
    "            # FIXME assume for now that testing uses the largest scale\n",
    "            size = float(self.min_size[-1])    # 指定输入图片的最小边长,注意是self.min_size不是min_size\n",
    "\n",
    "        scale_factor = size / short_edge\n",
    "        if long_edge * scale_factor > self.max_size:    #如果将短边缩放成最小尺寸时，长边超出了\n",
    "            scale_factor = self.max_size / long_edge    #就将长边缩放成最大尺寸\n",
    "        image = torch.nn.functional.interpolate(\n",
    "            image[None], scale_factor=scale_factor, mode='bilinear', align_corners=False)[0]\n",
    "        \n",
    "        if target is None:          #验证模式\n",
    "            return image, target\n",
    "        # 训练模式，对标签的GT也进行缩放\n",
    "        bbox = target[\"bboxes\"]\n",
    "        bbox = resize_boxes(bbox, (h, w), image.shape[-2:])\n",
    "        target[\"boxes\"] = bbox\n",
    "\n",
    "        return image, target\n",
    "\n",
    "    def batch_images(self, images, size_divisible=32):\n",
    "        \"\"\"batch中的图片在保持原长宽比缩放至网络限定输入尺寸之内后，再填充为离32最近的整数倍\n",
    "        每张图片的大小可能不一样\n",
    "        \"\"\"\n",
    "        # TODO 为什么是32倍\n",
    "        max_size = self.max_by_axis([list(img.shape) for img in images])    #max_size中包含着一个batch图片中最大的channels,w,h\n",
    "\n",
    "        stride = float(size_divisible)\n",
    "\n",
    "        # max_size = list(max_size)\n",
    "        # 将height向上调整到stride的整数倍\n",
    "        max_size[1] = int(math.ceil(float(max_size[1]) / stride) * stride)\n",
    "        # 将width向上调整到stride的整数倍\n",
    "        max_size[2] = int(math.ceil(float(max_size[2]) / stride) * stride)\n",
    "        batch_shape = [len(images)] + max_size\n",
    "\n",
    "        batched_imgs = images[0].new_full(batch_shape, 0)\n",
    "        for img, pad_img in zip(images, batched_imgs):\n",
    "            # 将输入images中的每张图片复制到新的batched_imgs的每张图片中，对齐左上角，保证bboxes的坐标不变\n",
    "            # 这样保证输入到网络中一个batch的每张图片的shape相同\n",
    "            # copy_: Copies the elements from src into self tensor and returns self\n",
    "            pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n",
    "\n",
    "        return batched_imgs\n",
    "\n",
    "\n",
    "    def max_by_axis(self, the_list):\n",
    "        # type: (List[List[int]]) -> List[int]\n",
    "\n",
    "        maxes = the_list[0]\n",
    "        for sublist in the_list[1:]:\n",
    "            for index, item in enumerate(sublist):\n",
    "                maxes[index] = max(maxes[index], item)\n",
    "        return maxes\n",
    "\n",
    "    def resize_boxes(self, boxes, original_size, new_size):\n",
    "        \"\"\"对GT的bbox进行和image一样的缩放\"\"\"\n",
    "        ratios = [\n",
    "            torch.tensor(s, dtype=torch.float32, device=boxes.device) /\n",
    "            torch.tensor(s_orig, dtype=torch.float32, device=boxes.device)\n",
    "            for s, s_orig in zip(new_size, original_size)\n",
    "        ]\n",
    "\n",
    "        ratios_height, ratios_width = ratios\n",
    "        # Removes a tensor dimension, boxes [minibatch, 4]\n",
    "        # Returns a tuple of all slices along a given dimension, already without it.\n",
    "        xmin, ymin, xmax, ymax = boxes.unbind(1)\n",
    "        xmin = xmin * ratios_width\n",
    "        xmax = xmax * ratios_width\n",
    "        ymin = ymin * ratios_height\n",
    "        ymax = ymax * ratios_height\n",
    "        return torch.stack((xmin, ymin, xmax, ymax), dim=1)\n",
    "\n",
    "    def forward(self, images, targets=None):\n",
    "        \"\"\"输入一个batch的图片，每张图片的大小可能不一样\n",
    "        \"\"\"\n",
    "        images = [img for img in images]\n",
    "        for i in range(len(images)):\n",
    "            image = images[i]\n",
    "            target_index = targets[i] if targets is not None else None\n",
    "            if image.dim() != 3:\n",
    "                raise ValueError(\"images is expected to be a list of 3d tensors \"\n",
    "                                 \"of shape [C, H, W], got {}\".format(image.shape))\n",
    "            image = self.normalize(image)                # 对图像进行标准化处理\n",
    "            image, target_index = self.resize(image, target_index)   # 对图像和对应的bboxes缩放到指定范围\n",
    "            images[i] = image\n",
    "            if targets is not None and target_index is not None:\n",
    "                targets[i] = target_index\n",
    "        \n",
    "        # 记录resize后，batch中每张图片的h和w\n",
    "        image_sizes = [img.shape[-2:] for img in images]\n",
    "        images = self.batch_images(images)  # 将images填充到同一尺寸再打包成一个batch\n",
    "        image_sizes_list = torch.jit.annotate(List[Tuple[int, int]], [])\n",
    "\n",
    "        for image_size in image_sizes:\n",
    "            assert len(image_size) == 2\n",
    "            image_sizes_list.append((image_size[0], image_size[1]))\n",
    "\n",
    "        image_list = ImageList(images, image_sizes_list)\n",
    "        # 保持长宽比的resize之后，填充前的h和w\n",
    "        return image_list, targets\n",
    "    def postprocess(self,\n",
    "                result,                     # type: List[Dict[str, Tensor]]\n",
    "                image_shapes,               # type: List[Tuple[int, int]]\n",
    "                original_image_sizes        # type: List[Tuple[int, int]]\n",
    "                ):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            result: list(dict), 网络的预测结果, len(result) == batch_size\n",
    "            image_shapes: list(torch.Size), 图像预处理缩放后填充前的尺寸, len(image_shapes) == batch_size\n",
    "            original_image_sizes: list(torch.Size), 图像的原始尺寸, len(original_image_sizes) == batch_size\n",
    "\n",
    "        Returns:\n",
    "        \"\"\"\n",
    "        if self.training:\n",
    "            return result\n",
    "        for i, (pred, im_s, o_im_s) in enumerate(zip(result, image_shapes, original_image_sizes)):\n",
    "            boxes = pred[\"boxes\"]\n",
    "            boxes = resize_boxes(boxes, im_s, o_im_s)\n",
    "            result[i][\"boxes\"] = boxes\n",
    "        return result\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "# Backbone"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Feature Pyramid Network"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class FeaturePyramidNetwork(nn.Module):\n",
    "    \"\"\"\n",
    "    Module that adds a FPN from on top of a set of feature maps. This is based on\n",
    "    `\"Feature Pyramid Network for Object Detection\" <https://arxiv.org/abs/1612.03144>`_.\n",
    "    The feature maps are currently supposed to be in increasing depth\n",
    "    order.\n",
    "    The input to the model is expected to be an OrderedDict[Tensor], containing\n",
    "    the feature maps on top of which the FPN will be added.\n",
    "    Arguments:\n",
    "        in_channels_list (list[int]): number of channels for each feature map that\n",
    "            is passed to the module\n",
    "        out_channels (int): number of channels of the FPN representation\n",
    "        extra_blocks (ExtraFPNBlock or None): if provided, extra operations will\n",
    "            be performed. It is expected to take the fpn features, the original\n",
    "            features and the names of the original features as input, and returns\n",
    "            a new list of feature maps and their corresponding names\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, in_channels_list, out_channels, extra_blocks=None):\n",
    "        super(FeaturePyramidNetwork, self).__init__()\n",
    "        # 用来调整resnet特征矩阵(layer1,2,3,4)的channel（kernel_size=1）\n",
    "        self.inner_blocks = nn.ModuleList()\n",
    "        # 对调整后的特征矩阵使用3x3的卷积核来得到对应的预测特征矩阵\n",
    "        self.layer_blocks = nn.ModuleList()\n",
    "        for in_channels in in_channels_list:\n",
    "            if in_channels == 0:\n",
    "                continue\n",
    "            inner_block_module = nn.Conv2d(in_channels, out_channels, 1)\n",
    "            layer_block_module = nn.Conv2d(out_channels, out_channels, 3, padding=1)\n",
    "            self.inner_blocks.append(inner_block_module)\n",
    "            self.layer_blocks.append(layer_block_module)\n",
    "\n",
    "        # initialize parameters now to avoid modifying the initialization of top_blocks\n",
    "        for m in self.children():\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                nn.init.kaiming_uniform_(m.weight, a=1)\n",
    "                nn.init.constant_(m.bias, 0)\n",
    "\n",
    "        self.extra_blocks = extra_blocks\n",
    "\n",
    "    def get_result_from_inner_blocks(self, x, idx):\n",
    "        # type: (Tensor, int) -> Tensor\n",
    "        \"\"\"\n",
    "        This is equivalent to self.inner_blocks[idx](x),\n",
    "        but torchscript doesn't support this yet\n",
    "        \"\"\"\n",
    "        num_blocks = len(self.inner_blocks)\n",
    "        if idx < 0:\n",
    "            idx += num_blocks\n",
    "        i = 0\n",
    "        out = x\n",
    "        for module in self.inner_blocks:\n",
    "            if i == idx:\n",
    "                out = module(x)\n",
    "            i += 1\n",
    "        return out\n",
    "\n",
    "    def get_result_from_layer_blocks(self, x, idx):\n",
    "        # type: (Tensor, int) -> Tensor\n",
    "        \"\"\"\n",
    "        This is equivalent to self.layer_blocks[idx](x),\n",
    "        but torchscript doesn't support this yet\n",
    "        \"\"\"\n",
    "        num_blocks = len(self.layer_blocks)\n",
    "        if idx < 0:\n",
    "            idx += num_blocks\n",
    "        i = 0\n",
    "        out = x\n",
    "        for module in self.layer_blocks:\n",
    "            if i == idx:\n",
    "                out = module(x)\n",
    "            i += 1\n",
    "        return out\n",
    "\n",
    "    def forward(self, x):\n",
    "        # type: (Dict[str, Tensor]) -> Dict[str, Tensor]\n",
    "        \"\"\"\n",
    "        Computes the FPN for a set of feature maps.\n",
    "        Arguments:\n",
    "            x (OrderedDict[Tensor]): feature maps for each feature level.\n",
    "        Returns:\n",
    "            results (OrderedDict[Tensor]): feature maps after FPN layers.\n",
    "                They are ordered from highest resolution first.\n",
    "        \"\"\"\n",
    "        # unpack OrderedDict into two lists for easier handling\n",
    "        names = list(x.keys())\n",
    "        x = list(x.values())\n",
    "\n",
    "        # 将resnet layer4的channel调整到指定的out_channels\n",
    "        # last_inner = self.inner_blocks[-1](x[-1])\n",
    "        last_inner = self.get_result_from_inner_blocks(x[-1], -1)\n",
    "        # result中保存着每个预测特征层\n",
    "        results = []\n",
    "        # 将layer4调整channel后的特征矩阵，通过3x3卷积后得到对应的预测特征矩阵\n",
    "        # results.append(self.layer_blocks[-1](last_inner))\n",
    "        results.append(self.get_result_from_layer_blocks(last_inner, -1))\n",
    "\n",
    "        for idx in range(len(x) - 2, -1, -1):\n",
    "            inner_lateral = self.get_result_from_inner_blocks(x[idx], idx)\n",
    "            feat_shape = inner_lateral.shape[-2:]\n",
    "            inner_top_down = F.interpolate(last_inner, size=feat_shape, mode=\"nearest\")\n",
    "            last_inner = inner_lateral + inner_top_down\n",
    "            results.insert(0, self.get_result_from_layer_blocks(last_inner, idx))\n",
    "\n",
    "        # 在layer4对应的预测特征层基础上生成预测特征矩阵5\n",
    "        if self.extra_blocks is not None:\n",
    "            results, names = self.extra_blocks(results, x, names)\n",
    "\n",
    "        # make it back an OrderedDict\n",
    "        out = OrderedDict([(k, v) for k, v in zip(names, results)])\n",
    "\n",
    "        return out\n",
    "\n",
    "\n",
    "class LastLevelMaxPool(torch.nn.Module):\n",
    "    \"\"\"\n",
    "    Applies a max_pool2d on top of the last feature map\n",
    "    \"\"\"\n",
    "\n",
    "    def forward(self, x, y, names):\n",
    "        # type: (List[Tensor], List[Tensor], List[str]) -> Tuple[List[Tensor], List[str]]\n",
    "        names.append(\"pool\")\n",
    "        x.append(F.max_pool2d(x[-1], 1, 2, 0))  # input, kernel_size, stride, padding\n",
    "        return x, names"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## resnet50_fpn_backbone"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class Bottleneck(nn.Module):\n",
    "    expansion = 4\n",
    "\n",
    "    def __init__(self, in_channel, out_channel, stride=1, downsample=None, norm_layer=None):\n",
    "        super(Bottleneck, self).__init__()\n",
    "        if norm_layer is None:\n",
    "            norm_layer = nn.BatchNorm2d\n",
    "\n",
    "        self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,\n",
    "                               kernel_size=1, stride=1, bias=False)  # squeeze channels\n",
    "        self.bn1 = norm_layer(out_channel)\n",
    "        # -----------------------------------------\n",
    "        self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,\n",
    "                               kernel_size=3, stride=stride, bias=False, padding=1)\n",
    "        self.bn2 = norm_layer(out_channel)\n",
    "        # -----------------------------------------\n",
    "        self.conv3 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel * self.expansion,\n",
    "                               kernel_size=1, stride=1, bias=False)  # unsqueeze channels\n",
    "        self.bn3 = norm_layer(out_channel * self.expansion)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.downsample = downsample\n",
    "\n",
    "    def forward(self, x):\n",
    "        identity = x\n",
    "        if self.downsample is not None:\n",
    "            identity = self.downsample(x)\n",
    "\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv3(out)\n",
    "        out = self.bn3(out)\n",
    "\n",
    "        out += identity\n",
    "        out = self.relu(out)\n",
    "\n",
    "        return out\n",
    "\n",
    "\n",
    "class ResNet(nn.Module):\n",
    "\n",
    "    def __init__(self, block, blocks_num, num_classes=1000, include_top=True, norm_layer=None):\n",
    "        super(ResNet, self).__init__()\n",
    "        if norm_layer is None:\n",
    "            norm_layer = nn.BatchNorm2d\n",
    "        self._norm_layer = norm_layer\n",
    "\n",
    "        self.include_top = include_top\n",
    "        self.in_channel = 64\n",
    "\n",
    "        self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2,\n",
    "                               padding=3, bias=False)\n",
    "        self.bn1 = norm_layer(self.in_channel)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n",
    "        self.layer1 = self._make_layer(block, 64, blocks_num[0])\n",
    "        self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)\n",
    "        self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)\n",
    "        self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)\n",
    "        if self.include_top:\n",
    "            self.avgpool = nn.AdaptiveAvgPool2d((1, 1))  # output size = (1, 1)\n",
    "            self.fc = nn.Linear(512 * block.expansion, num_classes)\n",
    "\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n",
    "\n",
    "    def _make_layer(self, block, channel, block_num, stride=1):\n",
    "        norm_layer = self._norm_layer\n",
    "        downsample = None\n",
    "        if stride != 1 or self.in_channel != channel * block.expansion:\n",
    "            downsample = nn.Sequential(\n",
    "                nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),\n",
    "                norm_layer(channel * block.expansion))\n",
    "\n",
    "        layers = []\n",
    "        layers.append(block(self.in_channel, channel, downsample=downsample,\n",
    "                            stride=stride, norm_layer=norm_layer))\n",
    "        self.in_channel = channel * block.expansion\n",
    "\n",
    "        for _ in range(1, block_num):\n",
    "            layers.append(block(self.in_channel, channel, norm_layer=norm_layer))\n",
    "\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.bn1(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.maxpool(x)\n",
    "\n",
    "        x = self.layer1(x)\n",
    "        x = self.layer2(x)\n",
    "        x = self.layer3(x)\n",
    "        x = self.layer4(x)\n",
    "\n",
    "        if self.include_top:\n",
    "            x = self.avgpool(x)\n",
    "            x = torch.flatten(x, 1)\n",
    "            x = self.fc(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "\n",
    "def overwrite_eps(model, eps):\n",
    "    \"\"\"\n",
    "    This method overwrites the default eps values of all the\n",
    "    FrozenBatchNorm2d layers of the model with the provided value.\n",
    "    This is necessary to address the BC-breaking change introduced\n",
    "    by the bug-fix at pytorch/vision#2933. The overwrite is applied\n",
    "    only when the pretrained weights are loaded to maintain compatibility\n",
    "    with previous versions.\n",
    "\n",
    "    Args:\n",
    "        model (nn.Module): The model on which we perform the overwrite.\n",
    "        eps (float): The new value of eps.\n",
    "    \"\"\"\n",
    "    for module in model.modules():\n",
    "        if isinstance(module, FrozenBatchNorm2d):\n",
    "            module.eps = eps\n",
    "\n",
    "\n",
    "class IntermediateLayerGetter(nn.ModuleDict):\n",
    "    \"\"\"\n",
    "    Module wrapper that returns intermediate layers from a model\n",
    "    It has a strong assumption that the modules have been registered\n",
    "    into the model in the same order as they are used.\n",
    "    This means that one should **not** reuse the same nn.Module\n",
    "    twice in the forward if you want this to work.\n",
    "    Additionally, it is only able to query submodules that are directly\n",
    "    assigned to the model. So if `model` is passed, `model.feature1` can\n",
    "    be returned, but not `model.feature1.layer2`.\n",
    "    Arguments:\n",
    "        model (nn.Module): model on which we will extract the features\n",
    "        return_layers (Dict[name, new_name]): a dict containing the names\n",
    "            of the modules for which the activations will be returned as\n",
    "            the key of the dict, and the value of the dict is the name\n",
    "            of the returned activation (which the user can specify).\n",
    "    \"\"\"\n",
    "    __annotations__ = {\n",
    "        \"return_layers\": Dict[str, str],\n",
    "    }\n",
    "\n",
    "    def __init__(self, model, return_layers):\n",
    "        if not set(return_layers).issubset([name for name, _ in model.named_children()]):\n",
    "            raise ValueError(\"return_layers are not present in model\")\n",
    "\n",
    "        orig_return_layers = return_layers\n",
    "        return_layers = {str(k): str(v) for k, v in return_layers.items()}\n",
    "        layers = OrderedDict()\n",
    "\n",
    "        # 遍历模型子模块按顺序存入有序字典\n",
    "        # 只保存layer4及其之前的结构，舍去之后不用的结构\n",
    "        for name, module in model.named_children():\n",
    "            layers[name] = module\n",
    "            if name in return_layers:\n",
    "                del return_layers[name]\n",
    "            if not return_layers:\n",
    "                break\n",
    "\n",
    "        super(IntermediateLayerGetter, self).__init__(layers)\n",
    "        self.return_layers = orig_return_layers\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = OrderedDict()\n",
    "        # 依次遍历模型的所有子模块，并进行正向传播，\n",
    "        # 收集layer1, layer2, layer3, layer4的输出\n",
    "        for name, module in self.items():\n",
    "            x = module(x)\n",
    "            if name in self.return_layers:\n",
    "                out_name = self.return_layers[name]\n",
    "                out[out_name] = x\n",
    "        return out\n",
    "\n",
    "\n",
    "class BackboneWithFPN(nn.Module):\n",
    "    \"\"\"\n",
    "    Adds a FPN on top of a model.\n",
    "    Internally, it uses torchvision.models._utils.IntermediateLayerGetter to\n",
    "    extract a submodel that returns the feature maps specified in return_layers.\n",
    "    The same limitations of IntermediatLayerGetter apply here.\n",
    "    Arguments:\n",
    "        backbone (nn.Module)\n",
    "        return_layers (Dict[name, new_name]): a dict containing the names\n",
    "            of the modules for which the activations will be returned as\n",
    "            the key of the dict, and the value of the dict is the name\n",
    "            of the returned activation (which the user can specify).\n",
    "        in_channels_list (List[int]): number of channels for each feature map\n",
    "            that is returned, in the order they are present in the OrderedDict\n",
    "        out_channels (int): number of channels in the FPN.\n",
    "        extra_blocks: ExtraFPNBlock\n",
    "    Attributes:\n",
    "        out_channels (int): the number of channels in the FPN\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, backbone, return_layers, in_channels_list, out_channels, extra_blocks=None):\n",
    "        super(BackboneWithFPN, self).__init__()\n",
    "\n",
    "        if extra_blocks is None:\n",
    "            extra_blocks = LastLevelMaxPool()\n",
    "\n",
    "        self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)\n",
    "        self.fpn = FeaturePyramidNetwork(\n",
    "            in_channels_list=in_channels_list,\n",
    "            out_channels=out_channels,\n",
    "            extra_blocks=extra_blocks,\n",
    "        )\n",
    "\n",
    "        self.out_channels = out_channels\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.body(x)\n",
    "        x = self.fpn(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "def resnet50_fpn_backbone(pretrain_path=\"\",\n",
    "                          norm_layer=FrozenBatchNorm2d,  # FrozenBatchNorm2d的功能与BatchNorm2d类似，但参数无法更新\n",
    "                          trainable_layers=3,\n",
    "                          returned_layers=None,\n",
    "                          extra_blocks=None):\n",
    "    \"\"\"\n",
    "    搭建resnet50_fpn——backbone\n",
    "    Args:\n",
    "        pretrain_path: resnet50的预训练权重，如果不使用就默认为空\n",
    "        norm_layer: 官方默认的是FrozenBatchNorm2d，即不会更新参数的bn层(因为如果batch_size设置的很小会导致效果更差，还不如不用bn层)\n",
    "                    如果自己的GPU显存很大可以设置很大的batch_size，那么自己可以传入正常的BatchNorm2d层\n",
    "                    (https://github.com/facebookresearch/maskrcnn-benchmark/issues/267)\n",
    "        trainable_layers: 指定训练哪些层结构\n",
    "        returned_layers: 指定哪些层的输出需要返回\n",
    "        extra_blocks: 在输出的特征层基础上额外添加的层结构\n",
    "\n",
    "    Returns:\n",
    "\n",
    "    \"\"\"\n",
    "    resnet_backbone = ResNet(Bottleneck, [3, 4, 6, 3],\n",
    "                             include_top=False,\n",
    "                             norm_layer=norm_layer)\n",
    "\n",
    "    if isinstance(norm_layer, FrozenBatchNorm2d):\n",
    "        overwrite_eps(resnet_backbone, 0.0)\n",
    "\n",
    "    if pretrain_path != \"\":\n",
    "        assert os.path.exists(pretrain_path), \"{} is not exist.\".format(pretrain_path)\n",
    "        # 载入预训练权重\n",
    "        print(resnet_backbone.load_state_dict(torch.load(pretrain_path), strict=False))\n",
    "\n",
    "    # select layers that wont be frozen\n",
    "    assert 0 <= trainable_layers <= 5\n",
    "    layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers]\n",
    "\n",
    "    # 如果要训练所有层结构的话，不要忘了conv1后还有一个bn1\n",
    "    if trainable_layers == 5:\n",
    "        layers_to_train.append(\"bn1\")\n",
    "\n",
    "    # freeze layers\n",
    "    for name, parameter in resnet_backbone.named_parameters():\n",
    "        # 只训练不在layers_to_train列表中的层结构\n",
    "        if all([not name.startswith(layer) for layer in layers_to_train]):\n",
    "            parameter.requires_grad_(False)\n",
    "\n",
    "    if extra_blocks is None:\n",
    "        extra_blocks = LastLevelMaxPool()\n",
    "\n",
    "    if returned_layers is None:\n",
    "        returned_layers = [1, 2, 3, 4]\n",
    "    # 返回的特征层个数肯定大于0小于5\n",
    "    assert min(returned_layers) > 0 and max(returned_layers) < 5\n",
    "\n",
    "    # return_layers = {'layer1': '0', 'layer2': '1', 'layer3': '2', 'layer4': '3'}\n",
    "    return_layers = {f'layer{k}': str(v) for v, k in enumerate(returned_layers)}\n",
    "\n",
    "    # in_channel 为layer4的输出特征矩阵channel = 2048\n",
    "    in_channels_stage2 = resnet_backbone.in_channel // 8  # 256\n",
    "    # 记录resnet50提供给fpn的每个特征层channel\n",
    "    in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers]\n",
    "    # 通过fpn后得到的每个特征层的channel\n",
    "    out_channels = 256\n",
    "    return BackboneWithFPN(resnet_backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# RPN"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## RPN Head\n",
    "输入进来的特征层经过：\n",
    "1. 不改变尺寸和通道数的 3x3卷积+relu\n",
    "2. 1x1卷积将通道数变为num_anchors（每个anchor只预测一个概率，论文两个，Pytorch实现一个）\n",
    "3. 1x1卷积将通道数变为num_anchors*4（每个anchor四个坐标偏移量，中心坐标和长宽）"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RPNHead(nn.Module):\n",
    "    \"\"\"\n",
    "    计算分类和边框位置回归\n",
    "    Returns:\n",
    "        logits:List[Tensors0(batch_size, num_anchors, h, w), Tensor1(batch_size, num_anchors, h, w),...]:\n",
    "        bbox_reg:List[Tensors0(batch_size, num_anchors*4, h, w),....]:每个Tensor代表FPN的一个特征层\n",
    "    \"\"\"\n",
    "    def __init__(self, in_channels, num_anchors):\n",
    "        super(RPNHead).__init__()\n",
    "        # 3x3 滑动窗口（不改变尺寸和通道数,但是增加了每个像素点的感受野）\n",
    "        self.conv = nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)\n",
    "        # 计算每个对应位置的anchor的分类概率（前景或背景）\n",
    "        self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1)\n",
    "        # 回归每个对应位置的坐标\n",
    "        self.bbox_pred = nn.Conv2d(in_channels, 4*num_anchors, kernel_size=1, stride=1)\n",
    "\n",
    "        for layer in self.children():\n",
    "            if isinstance(layer, nn.Conv2d):\n",
    "                torch.nn.init.normal_(layer.weight, std=0.01)\n",
    "                torch.nn.init.constant_(layer.bias, 0)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        # type: (List[Tensor])\n",
    "        #输入的是FPN之后的多个特征层List[Tensor0(batch_size, c, h, w),Tensor1(batch_size, c, h, w),...]\n",
    "        logits = []\n",
    "        bbox_reg = []\n",
    "        for i, feature in enumerate(x):\n",
    "            t = F.relu(self.conv(feature))\n",
    "            logits.append(self.cls_logits(t))\n",
    "            bbox_reg.append(self.bbox_pred(t))\n",
    "        return logits, bbox_reg"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Anchors Generator\n",
    "返回List[[image0的所有anchors], [image1的所有anchors],[],...]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class AnchorsGenerator(nn.Module):\n",
    "    __annotations__ = {\n",
    "        \"cell_anchors\": Optional[List[torch.Tensor]],\n",
    "        \"_cache\": Dict[str, List[torch.Tensor]]\n",
    "    }\n",
    "    def __init__(self, sizes=(128, 256, 512), aspect_ratios=(0.5, 1.0, 2.0)):\n",
    "        \"\"\"\n",
    "\n",
    "        Args:\n",
    "            sizes: ((32, 64, 128, 256, 512), )\n",
    "            aspect_ratios: ((0.5, 1.0, 2.0),)\n",
    "        \"\"\"\n",
    "        super(AnchorsGenerator, self).__init__()\n",
    "\n",
    "        if not isinstance(sizes[0], (list, tuple)):\n",
    "            sizes = tuple((s, ) for s in sizes)\n",
    "        if not isinstance(aspect_ratios[0], (list, tuple)):\n",
    "            aspect_ratios = (aspect_ratios,) * len(sizes)\n",
    "        \n",
    "        assert len(sizes) == len(aspect_ratios)\n",
    "\n",
    "        self.sizes = sizes\n",
    "        self.aspect_ratios = aspect_ratios\n",
    "        self.cell_anchors = None\n",
    "        self._cache = {}\n",
    "\n",
    "    def generate_anchors(self, scales, aspect_ratios, dtype=torch.float32, device=\"cpu\"):\n",
    "        # type: (List[int], List[float], int, Device)\n",
    "        \"\"\"\n",
    "        h = 0.5w,area = h*w = 0.5w*w.每个面积生成三个anchors\n",
    "        Args:\n",
    "            scales: (32, 64, 128, 256, 512)，表示anchor的面积分别为32^2, 64^2, 128^2,...\n",
    "            aspect_ratios: (0.5, 1.0, 2.0)长宽比\n",
    "        Returns:\n",
    "            15行4列的Tensor，每3行是一个面积的anchor\n",
    "\n",
    "        \"\"\"\n",
    "        scales = torch.as_tensor(scales, dtype=dtype, device=device)\n",
    "        aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device)\n",
    "        h_ratios = torch.sqrt(aspect_ratios)\n",
    "        w_ratios = 1.0 / h_ratios\n",
    "        # [r1, r2, r3]' * [s1, s2, s3]\n",
    "        # number of elements is len(ratios)*len(scales)\n",
    "        ws = (w_ratios[:, None] * scales[None, :]).view(-1)     #(3,1)矩阵*(1,5)矩阵得到(15)个宽度组成的矩阵，每三个属于一个面积\n",
    "        hs = (h_ratios[:, None] * scales[None, :]).view(-1)\n",
    "\n",
    "        # left-top, right-bottom coordinate relative to anchor center(0, 0)\n",
    "        # 生成的anchors模板都是以（0, 0）为中心的, shape [len(ratios)*len(scales), 4]\n",
    "        base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2   #这个函数不懂\n",
    "\n",
    "        return base_anchors.round()  # round 四舍五入\n",
    "\n",
    "    def cached_grid_anchors(self, grid_sizes, strides):\n",
    "        # type: (List[List[int]], List[List[Tensor]])\n",
    "        key = str(grid_sizes) + str(strides)\n",
    "        if key in self._cache:\n",
    "            return self._cache[key]\n",
    "        anchors = self.grid_anchors(grid_sizes, strides)\n",
    "        self._cache[key] = anchors\n",
    "        return anchors\n",
    "\n",
    "    def grid_anchors(self, grid_sizes, strides):\n",
    "        # type: (List[List[int]], List[List[Tensor]])\n",
    "\n",
    "        anchors = []\n",
    "        cell_anchors = self.cell_anchors\n",
    "        assert cell_anchors is not None\n",
    "        # 每个循环处理一张PFN的特征图\n",
    "        for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):\n",
    "            grid_height, grid_width = size\n",
    "            stride_height, stride_width = stride\n",
    "            device = base_anchors.device\n",
    "            # 每个cell的起始位置的像素点位置\n",
    "            shifts_x = torch.arange(0, grid_width, dtype=torch.float32, device=device) * stride_width\n",
    "            shifts_y = torch.arange(0, grid_height, dtype=torch.float32, device=device) * stride_height\n",
    "            # 得到两个维度为：grid_h x grid_w的张量，分别表示每个cell的左上角像素点坐标的width值和height值\n",
    "            # 假设插值填充后的图片被划分为grid_h x grid_w个格子\n",
    "            # shift_x表示每个各自的左上角在原图中的 width 方向的像素点位置\n",
    "            # shift_y表示每个各自的左上角在原图中的 height 方向的像素点位置\n",
    "            shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)\n",
    "            shifts = torch.stack([shift_x, shift_y, shift_x, shift_y], dim=1)\n",
    "            # base_anchor中是15个anchor：[[-ws/2, -hs/2, ws/2, hs/2],[],[],...]\n",
    "            # 也就是以base_anchor中心为原点（左x下y）\n",
    "            # 此处将各个anchor的坐标原点（中心）移动到了每个cell的左上角，然后求得了移动后的anchor坐标[-ws/2+x0,-hs/2+y0,]\n",
    "            shifts_anchor = shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)     #维度为(grid_h x grid_w,15,4)\n",
    "            anchors.append(shifts_anchor.reshape(-1, 4))    #维度为(grid_h x grid_w x 15,4)\n",
    "\n",
    "        return anchors  # List[Tensor(all_num_anchors, 4)]\n",
    "\n",
    "\n",
    "\n",
    "    def forward(self, image_list, feature_maps):\n",
    "        # type: (ImageList, List[Tensor])\n",
    "        # 获取backbone-fpn提取的特征图的高度宽度，[[h1,w1],[h2,w2],...]\n",
    "        grid_sizes = list([feature_map.shape[-2:] for feature_map in feature_maps])\n",
    "        # 获取插值填充后的batch图片的尺寸\n",
    "        image_size = image_list.tensors.shape[-2:]\n",
    "\n",
    "         # 获取变量类型和设备类型\n",
    "        dtype, device = feature_maps[0].dtype, feature_maps[0].device\n",
    "        # one step in feature map equate n pixel stride in origin image\n",
    "        # 计算特征层上的一步等于原始图像上的步长（一个cell相当于多少像素）[[cell_size_h, cell_size_w],...]\n",
    "        strides = [[torch.tensor(image_size[0] // g[0], dtype=torch.int64, device=device),\n",
    "                    torch.tensor(image_size[1] // g[1], dtype=torch.int64, device=device)] for g in grid_sizes]\n",
    "        # 使得self.cell_anchors = [[], ...]第一个元素是一个15行4列的Tensor，代表15个anchor的不标准位置\n",
    "        # 每个RPN特征层都会让self.cell_anchors多一个元素\n",
    "        self.set_cell_anchors(dtype, device)\n",
    "        # 计算/读取所有anchors的坐标信息（这里的anchors信息是映射到原图上的所有anchors信息，不是anchors模板）\n",
    "        # 得到的是一个list列表，对应每张预测特征图映射回原图的anchors坐标信息\n",
    "        anchors_over_all_feature_maps = self.cached_grid_anchors(grid_sizes, strides)\n",
    "\n",
    "        anchors = torch.jit.annotate(List[List[torch.Tensor]], [])\n",
    "        # 每次循环处理batch中的一张图片\n",
    "        for i,(image_height, image_width) in enumerate(image_list.image_sizes):\n",
    "            anchors_in_image = []\n",
    "            for anchors_per_feature_map in anchors_over_all_feature_maps:\n",
    "                anchors_in_image.append(anchors_per_feature_map)\n",
    "            anchors.append(anchors_in_image)\n",
    "        # 将每一张图像的所有PFN预测特征层的anchors坐标信息拼接在一起\n",
    "        # anchors是个list，每个元素为一张图像的所有anchors信息\n",
    "        anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors]\n",
    "        # Clear the cache in case that memory leaks.\n",
    "        self._cache.clear()\n",
    "        return anchors\n",
    "\n",
    "\n",
    "    def set_cell_anchors(self, dtype, device):\n",
    "        # type: (int, Device) -> None\n",
    "        if self.cell_anchors is not None:\n",
    "            pass\n",
    "        # list中每个元素表示一个FPN的特征层的anchor，此处只有一个特征层\n",
    "        cell_anchors = [\n",
    "            self.generate_anchors(sizes, aspect_ratios, dtype, device)\n",
    "            for sizes, aspect_ratios in zip(self.sizes, self.aspect_ratios)\n",
    "        ]\n",
    "        self.cell_anchors = cell_anchors\n",
    "\n",
    "    def num_anchors_per_location(self):\n",
    "        \"\"\"\n",
    "        返回每个FPN特征层中，每个位置anchor数\n",
    "        :return: [,]\n",
    "        \"\"\"\n",
    "        return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)]\n"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Region Proposal Network"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [],
   "source": [
    "class RegionProposalNetwork(torch.nn.Module):\n",
    "    def __init__(self, anchor_generator, head,\n",
    "                 fg_iou_thresh, bg_iou_thresh,\n",
    "                 batch_size_per_image, positive_fraction,\n",
    "                 pre_nms_top_n, post_nms_top_n, nms_thresh, score_thresh=0.0):\n",
    "        super(RegionProposalNetwork, self).__init__()\n",
    "        self.anchor_generator = anchor_generator\n",
    "        self.head = head\n",
    "        self.box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))\n",
    "\n",
    "        # use during training\n",
    "        # 计算anchors与真实bbox的iou\n",
    "        self.box_similarity = box_iou\n",
    "\n",
    "        self.proposal_matcher = Matcher(\n",
    "            fg_iou_thresh,  # 当iou大于fg_iou_thresh(0.7)时视为正样本\n",
    "            bg_iou_thresh,  # 当iou小于bg_iou_thresh(0.3)时视为负样本\n",
    "            allow_low_quality_matches=True\n",
    "        )\n",
    "\n",
    "        self.fg_bg_sampler = BalancedPositiveNegativeSampler(\n",
    "            batch_size_per_image, positive_fraction  # 256, 0.5\n",
    "        )\n",
    "\n",
    "        # pre_nms_top_n是第一步是每个FPN层根据NMS保留的个数\n",
    "        # 所以pre_nms_top_n和post\n",
    "        self._pre_nms_top_n = pre_nms_top_n     #过滤候选框第一步训练时2000，预测时1000\n",
    "        self._post_nms_top_n = post_nms_top_n   #过滤候选框最后一步\n",
    "        self.nms_thresh = nms_thresh\n",
    "        self.score_thresh = score_thresh\n",
    "        self.min_size = 1.\n",
    "    def pre_nms_top_n(self):\n",
    "        if self.training:\n",
    "            return self._pre_nms_top_n['training']\n",
    "        return self._pre_nms_top_n['testing']\n",
    "    def post_nms_top_n(self):\n",
    "        if self.training:\n",
    "            return self._post_nms_top_n['training']\n",
    "        return self._post_nms_top_n['testing']\n",
    "\n",
    "    def foward(self, images, features, targets=None):\n",
    "        # type: (ImageList, Dict[str, Tensor], Optional[List[Dict[str, Tensor]]])\n",
    "        \"\"\"\n",
    "\n",
    "        Args:\n",
    "            images:\n",
    "            features:FPN各个特征层，格式为Dict{'0':Tensor(batch_size, channels, h, w), '1':...}\n",
    "            targets:\n",
    "\n",
    "        Returns:\n",
    "\n",
    "        \"\"\"\n",
    "        features = list(features.values())  #丢弃key只取value\n",
    "\n",
    "        objectness, pred_bbox_deltas = self.head(features)\n",
    "        # anchors:List[Tensor0(image0的所有anchors), Tensor1(image1的所有anchors),...]\n",
    "        anchors = self.anchor_generator(images, features)\n",
    "        #batch_size\n",
    "        num_images = len(anchors)\n",
    "        # [[15, grid_h, grid_w],]\n",
    "        num_anchors_per_level_shape_tensors = [o[0].shape for o in objectness]\n",
    "        # 每个特征层的anchors数量:[15H0W0, 15H1W1, 15H2W2,...]\n",
    "        num_anchors_per_level = [s[0] * s[1] * s[2] for s in num_anchors_per_level_shape_tensors]\n",
    "        # 调整内部tensor格式以及shape\n",
    "        objectness, pred_bbox_deltas = concat_box_prediction_layers(objectness,\n",
    "                                                                    pred_bbox_deltas)\n",
    "        # apply pred_bbox_deltas to anchors to obtain the decoded proposals\n",
    "        # note that we detach the deltas because Faster R-CNN do not backprop through\n",
    "        # the proposals\n",
    "        # 将预测的bbox regression参数应用到anchors上得到最终预测bbox坐标\n",
    "        proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors)\n",
    "        proposals = proposals.view(num_images, -1, 4)\n",
    "        # scores取前N1个(FPN层)、边界限制、滤除小目标、NMS、取scores前N2个\n",
    "        boxes, scores = self.filter_proposals(proposals, objectness, images.image_sizes, num_anchors_per_level)\n",
    "\n",
    "        losses = {}\n",
    "        if self.training:\n",
    "            assert targets is not None\n",
    "            labels, matched_gt_boxes = self.assign_targets_to_anchors(anchors, targets)\n",
    "            # [[dx,dy,dw,dh], [], ...]\n",
    "            regression_targets = self.box_coder.encode(matched_gt_boxes, anchors)\n",
    "            loss_objectness, loss_rpn_box_reg = self.compute_loss(\n",
    "                objectness, pred_bbox_deltas, labels, regression_targets\n",
    "            )\n",
    "            losses = {\n",
    "                \"loss_objectness\":loss_objectness,\n",
    "                \"loss_rpn_box_reg\":loss_rpn_box_reg\n",
    "            }\n",
    "        return boxes, losses\n",
    "\n",
    "    def compute_loss(self, objectness, pred_bbox_deltas, labels, regression_targets):\n",
    "        # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor]\n",
    "        \"\"\"\n",
    "        给定每个anchor的预测位置偏移和类别，采样出一定数量的样本，计算损失\n",
    "        :param objectness:[BS(15H0W0 + 15H1W1 + 15H2W2...), 1]\n",
    "        :param pred_bbox_deltas:[BS(15H0W0 + 15H1W1 + 15H2W2...), 4]\n",
    "        :param labels:记载了batch里每张图片的每个anchors的正负情况[image0[-1,0,1,1,..]]\n",
    "        :param regression_targets:[[dx,dy,dw,dh], [], ...]\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)\n",
    "        sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)\n",
    "        sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)\n",
    "        sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)\n",
    "        objectness = objectness.flatten()\n",
    "        labels = torch.cat(labels, dim=0)\n",
    "        # [BS(15H0W0 + 15H1W1 + 15H2W2...), 4]\n",
    "        regression_targets = torch.cat(regression_targets, dim=0)\n",
    "        # 计算边界框回归损失\n",
    "        box_loss = smooth_l1_loss(\n",
    "            pred_bbox_deltas[sampled_pos_inds],\n",
    "            regression_targets[sampled_pos_inds],\n",
    "            beta = 1 / 9,\n",
    "            size_average=False,\n",
    "        ) / (sampled_inds.numel())\n",
    "        # 计算类别概率损失(传入未经过sigmoid的数据，该函数会进行sigmoid)\n",
    "        # 只区分前景和背景，sampled_inds只挑出了labels中的0和1，放弃了-1\n",
    "        objectness_loss = F.binary_cross_entropy_with_logits(\n",
    "            objectness[sampled_inds], labels[sampled_inds]\n",
    "        )\n",
    "\n",
    "        return objectness_loss, box_loss\n",
    "\n",
    "\n",
    "    def filter_proposals(self, proposals, objectness, image_shapes, num_anchors_per_level):\n",
    "        # type: (Tensor, Tensor, List[Tuple[int, int]], List[int]) -> Tuple[List[Tensor], List[Tensor]]\n",
    "        \"\"\"\n",
    "        滤除目标包括五步：\n",
    "        1. 每张图片的每个FPN上特征图，只取前pre_nms_topn个，所以共取了N*pre_nms_topn个\n",
    "        2. 将bbox超出image插值后填充前尺寸的clap为图片边界\n",
    "        3. 移除长或者宽小于min_size的bbox（移除小目标框）\n",
    "        4. 每张图片，在每个FPN特征层上做NMS\n",
    "        5. 每张图片，取NMS之后scores前post_nms_topn个bbox，共取了post_nms_topn个\n",
    "        Args:\n",
    "            proposals: batch图片中的所有的bbox坐标，[BS, (15H0W0 + 15H1W1 + 15H2W2...), 4]\n",
    "            objectness: [BS(15H0W0 + 15H1W1 + 15H2W2...), 1]\n",
    "            image_shapes: batch中的图片在插值后填充前的尺寸，[(), (),...]\n",
    "            num_anchors_per_level: 每个特征层的anchors数量:[15H0W0, 15H1W1, 15H2W2,...]\n",
    "\n",
    "        Returns:\n",
    "\n",
    "        \"\"\"\n",
    "        num_images = proposals.shape[0]\n",
    "        device = proposals.device\n",
    "        # objectness不反向传播\n",
    "        objectness = objectness.detach()\n",
    "        # [BS, (15H0W0 + 15H1W1 + 15H2W2...)]\n",
    "        objectness = objectness.reshape(num_images, -1)\n",
    "        # [[15H0W0个0], [15H1W1个1], [15H2W2个2], ...]\n",
    "        levels = [torch.full((n, ), idx, dtype=torch.int64, device=device)\n",
    "                  for idx, n in enumerate(num_anchors_per_level)]\n",
    "        levels = torch.cat(levels, 0)\n",
    "        # [BS, (15H0W0 + 15H1W1 + 15H2W2...)]和objectness维度一样，在对应特征图位置是0或1或2\n",
    "        levels = levels.reshape(1, -1).expand_as(objectness)\n",
    "        # 1. 取每张image中每个特征层上scores前1000或2000的bbox\n",
    "        top_n_idx = self._get_top_n_idx(objectness, num_anchors_per_level)\n",
    "        image_range = torch.arange(num_images, device=device)\n",
    "        batch_idx = image_range[:, None] # [batch_size, 1]\n",
    "        objectness = objectness[batch_idx, top_n_idx]\n",
    "        levels = levels[batch_idx, top_n_idx]\n",
    "        proposals = proposals[batch_idx, top_n_idx]\n",
    "        final_boxes = []\n",
    "        final_scores = []\n",
    "        # 每次循环是batch中的一张图片\n",
    "        for boxes, scores, lvl, img_shape in zip(proposals, objectness, levels, image_shapes):\n",
    "            # 2. 限制预测的bbox在图像插值后（填充前）的图像内\n",
    "            boxes = clip_boxes_to_image(boxes, img_shape)\n",
    "            # 3. 移除长或者宽小于min_size的bbox\n",
    "            keep = remove_small_boxes(boxes, self.min_size)\n",
    "            boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep]\n",
    "            # 4. 在每个FPN特征层上做NMS\n",
    "            keep = batched_nms(boxes, scores, lvl, self.nms_thresh)     # 索引按照scores排序\n",
    "            # 5. 取NMS之后scores前N个目标\n",
    "            keep = keep[: self.post_nms_top_n()]\n",
    "            boxes, scores = boxes[keep], scores[keep]\n",
    "            final_boxes.append(boxes)\n",
    "            final_scores.append(scores)\n",
    "        return final_boxes, final_scores\n",
    "\n",
    "    def assign_targets_to_anchors(self, anchors, targets):\n",
    "        # type: (List[Tensor], List[Dict[str, Tensor]]) -> Tuple[List[Tensor], List[Tensor]]\n",
    "        \"\"\"\n",
    "        输入batch中每张图片中的anchors和targets，给每个anchors都分配了一个targets\n",
    "        Args:\n",
    "            anchors: List[Tensor0(image0的所有anchors), Tensor1(image1的所有anchors),...]\n",
    "            targets: [ [[xmin,ymin,xmax,ymax], [], []], [], [],... ]batch中每张图片的所有GT的坐标\n",
    "\n",
    "        Returns:\n",
    "            labels([ image0[1,0,-1,0,-1,1], image1[], [], [] ]):List中每个元素是batch中一张图的所有anchor的情况，-1丢弃，0负样本，1正样本\n",
    "            matched_gt_boxes_per_image [ image0[anchor0[xmin,...], [], [], ...], image1[], ... ]batch中每张image的所有anchor对应的GT的坐标,负样本，丢弃样本对应的都是target中0号GT\n",
    "        \"\"\"\n",
    "        labels = []\n",
    "        matched_gt_boxes = []\n",
    "        for anchors_per_image, targets_per_image in zip(anchors, targets):\n",
    "            gt_boxes = targets_per_image[\"boxes\"]\n",
    "            if gt_boxes.numel() == 0:\n",
    "                device = anchors_per_image.device\n",
    "                matched_gt_boxes_per_image = torch.zeros(anchors_per_image.shape,  dtype=torch.float32, device=device)\n",
    "                labels_per_image = torch.zeros((anchors_per_image.shape[0],), dtype=torch.float32, device=device)\n",
    "            else:\n",
    "                # 得到以gt_boxes为行，anchor为列的IoU矩阵\n",
    "                match_quality_matrix = box_iou(gt_boxes, anchors_per_image)\n",
    "                # matched_idxs:[-1, -2, -1, 0, 2, 3,...]元素数量为anchors总数量，m[i]反映了第i个anchor是负样本(-1)、丢弃样本(-2)、正样本(>-1)。正样本时，m[i]是该anchor对应的image的target['boxes']中的第几个GT\n",
    "                matched_idxs = self.proposal_matcher(match_quality_matrix)\n",
    "                # 此处虽然把负样本和丢弃样本的GT都假设为GT0，但是labels标记了他们是负样本，在计算损失时他们的位置信息不会被算进去\n",
    "                matched_gt_boxes_per_image = gt_boxes[matched_idxs.clamp(min=0)]\n",
    "                labels_per_image = matched_idxs >= 0\n",
    "                labels_per_image = labels_per_image.to(dtype=torch.float32)\n",
    "\n",
    "                bg_indices = matched_idxs == self.proposal_matcher.BELOW_LOW_THRESHOLD\n",
    "                labels_per_image[bg_indices] = 0.0\n",
    "\n",
    "                inds_to_discard = matched_idxs == self.proposal_matcher.BETWEEN_THRESHOLDS\n",
    "                labels_per_image[inds_to_discard] = -1.0\n",
    "\n",
    "            labels.append(labels_per_image)\n",
    "            matched_gt_boxes.append(matched_gt_boxes_per_image)\n",
    "        # labels:[image0[-1,0,1,1,1,...], image1[], ...]\n",
    "        # matched_gt_boxes_per_image:[image0[[gt0坐标], [gt2坐标], [gt1坐标],],image1[[],.]],注意负样本和丢弃样本对应的GT都是GT0\n",
    "        return labels, matched_gt_boxes_per_image\n",
    "\n",
    "    def _get_top_n_idx(self, objectness, num_anchors_per_level):\n",
    "        # type: (Tensor, List[int])\n",
    "        \"\"\"\n",
    "        batch中每张图片都在多个FPN特征层上进行了预测，按照objectness取每个预测特征层的前n个\n",
    "        Args:\n",
    "            objectness: [BS, (15H0W0 + 15H1W1 + 15H2W2...)]\n",
    "            num_anchors_per_level: [15H0W0, 15H1W1, 15H2W2,...]\n",
    "\n",
    "        Returns:\n",
    "\n",
    "        \"\"\"\n",
    "        r = []\n",
    "        offset = 0\n",
    "        for ob in objectness.split(num_anchors_per_level, 1):   #ob维度[BS, 15H0W0]\n",
    "            if torchvision._is_tracing():\n",
    "                num_anchors, pre_nms_top_n = _onnx_get_num_anchors_and_pre_nms_top_n(ob, self.pre_nms_top_n())\n",
    "            else:\n",
    "                num_anchors = ob.shape[1]  # 预测特征层上的预测的anchors个数\n",
    "                pre_nms_top_n = min(self.pre_nms_top_n(), num_anchors)\n",
    "\n",
    "            _, top_n_idx = ob.topk(pre_nms_top_n, dim=1)\n",
    "            r.append(top_n_idx + offset)\n",
    "            offset += num_anchors\n",
    "        return torch.cat(r, dim=1)\n",
    "\n",
    "def concat_box_prediction_layers(box_cls, box_regression):\n",
    "    # type: (List[Tensor], List[Tensor])\n",
    "    \"\"\"\n",
    "    将所有特征层中所有batch的anchor都拼接放在一起\n",
    "    Args:\n",
    "        box_cls: [FPN特征层0(BS, Ax1, H0, W0), FPN特征层1(BS, Ax1, H1, W1),...]\n",
    "        box_regression: [FPN特征层0(BS, Ax4, H0, W0), FPN特征层1(BS, Ax4, H1, W1),...]\n",
    "    Returns:\n",
    "        box_cls:[FPN特征层数 x BS x H x W x 15, 1] = [BS(15H0W0 + 15H1W1 + 15H2W2...), 1]\n",
    "        box_regression:[FPN特征层数 x BS x H x W x 15, 4]=[BS(15H0W0 + 15H1W1 + 15H2W2...), 4]\n",
    "    \"\"\"\n",
    "    box_cls_flattened = []\n",
    "    box_regression_flattened = []\n",
    "\n",
    "    # 遍历每个FPN特征层\n",
    "    for box_cls_per_level, box_regression_per_level in zip(box_cls, box_regression):\n",
    "        N, AxC, H, W = box_cls_per_level.shape\n",
    "        Ax4 = box_regression_per_level.shape[1]\n",
    "        # grid中每个cell的anchors个数：15\n",
    "        A = Ax4 // 4\n",
    "        # 每个anchor的类别数：前景和背景（Pytorch只用了1个）\n",
    "        C = AxC // A\n",
    "        # 将(BS, Ax1, H, W)变为(BS, HxWxA, 1)\n",
    "        box_cls_per_level = permute_and_flatten(box_cls_per_level, N, A, C, H, W)\n",
    "        box_cls_flattened.append(box_cls_per_level)\n",
    "        # 将(BS, Ax4, H, W)变为(BS, HxWxA, 4)\n",
    "        box_regression_per_level = permute_and_flatten(box_regression_per_level, N, A, 4, H, W)\n",
    "        box_regression_flattened.append(box_regression_per_level)\n",
    "\n",
    "    # 将结果reshape成(所有特征层中所有batch中的anchors数量, 1或者4)的维度\n",
    "    box_cls = torch.cat(box_cls_flattened, dim=1).flatten(0, -2)  # start_dim, end_dim\n",
    "    box_regression = torch.cat(box_regression_flattened, dim=1).reshape(-1, 4)\n",
    "    return box_cls, box_regression\n",
    "\n",
    "\n",
    "def permute_and_flatten(layer, N, A, C, H, W):\n",
    "    # type: (Tensor, int, int, int, int, int)\n",
    "    \"\"\"\n",
    "    将(batch_size, AxC, H, W)维度的Tensor变为：(Batch_size, AxHxW, C)\n",
    "    Args:\n",
    "        N:batch_size\n",
    "        A:每个cell分配的anchor个数（15）\n",
    "        C:每个anchor预测的参数个数（类别数或者位置参数）\n",
    "    Returns:\n",
    "    \"\"\"\n",
    "    # view和reshape功能是一样的，先展平所有元素在按照给定shape排列\n",
    "    # view函数只能用于内存中连续存储的tensor，permute等操作会使tensor在内存中变得不再连续，此时就不能再调用view函数\n",
    "    # reshape则不需要依赖目标tensor是否在内存中是连续的\n",
    "    layer = layer.view(N, -1, C, H, W)\n",
    "    layer = layer.permute(0, 3, 4, 1, 2)\n",
    "    layer = layer.reshape(N, -1, C)\n",
    "    return layer"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "# det utils"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Box Coder"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "<module '__main__'> is a built-in class",
     "output_type": "error",
     "traceback": [
      "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[0;31mTypeError\u001B[0m                                 Traceback (most recent call last)",
      "\u001B[0;32m<ipython-input-8-562dbc1f51c9>\u001B[0m in \u001B[0;36m<module>\u001B[0;34m\u001B[0m\n\u001B[1;32m      1\u001B[0m \u001B[0;34m@\u001B[0m\u001B[0mtorch\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mjit\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mscript\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m----> 2\u001B[0;31m \u001B[0;32mclass\u001B[0m \u001B[0mBoxCoder\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobject\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m      3\u001B[0m     \u001B[0;32mdef\u001B[0m \u001B[0m__init__\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mself\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mweights\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mbbox_xform_clip\u001B[0m\u001B[0;34m=\u001B[0m\u001B[0mmath\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mlog\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;36m1000.\u001B[0m \u001B[0;34m/\u001B[0m \u001B[0;36m16\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m      4\u001B[0m         \u001B[0;31m# type: (Tuple[float, float, float, float], float) -> None\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m      5\u001B[0m         \u001B[0mself\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mweights\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mweights\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m~/.local/lib/python3.6/site-packages/torch/jit/_script.py\u001B[0m in \u001B[0;36mscript\u001B[0;34m(obj, optimize, _frames_up, _rcb)\u001B[0m\n\u001B[1;32m   1126\u001B[0m         \u001B[0;32mif\u001B[0m \u001B[0m_rcb\u001B[0m \u001B[0;32mis\u001B[0m \u001B[0;32mNone\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m   1127\u001B[0m             \u001B[0m_rcb\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0m_jit_internal\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mcreateResolutionCallbackFromFrame\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0m_frames_up\u001B[0m \u001B[0;34m+\u001B[0m \u001B[0;36m1\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m-> 1128\u001B[0;31m         \u001B[0m_compile_and_register_class\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobj\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0m_rcb\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mqualified_name\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m   1129\u001B[0m         \u001B[0;32mreturn\u001B[0m \u001B[0mobj\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m   1130\u001B[0m     \u001B[0;32melse\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m~/.local/lib/python3.6/site-packages/torch/jit/_script.py\u001B[0m in \u001B[0;36m_compile_and_register_class\u001B[0;34m(obj, rcb, qualified_name)\u001B[0m\n\u001B[1;32m    134\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    135\u001B[0m \u001B[0;32mdef\u001B[0m \u001B[0m_compile_and_register_class\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobj\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mrcb\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mqualified_name\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 136\u001B[0;31m     \u001B[0mast\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mget_jit_class_def\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobj\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mobj\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__name__\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m    137\u001B[0m     \u001B[0mdefaults\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mtorch\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mjit\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mfrontend\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mget_default_args_for_class\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobj\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    138\u001B[0m     \u001B[0mscript_class\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mtorch\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_C\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_jit_script_class_compile\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mqualified_name\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mast\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mdefaults\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mrcb\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m~/.local/lib/python3.6/site-packages/torch/jit/frontend.py\u001B[0m in \u001B[0;36mget_jit_class_def\u001B[0;34m(cls, self_name)\u001B[0m\n\u001B[1;32m    203\u001B[0m     \u001B[0mproperties\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mget_class_properties\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mcls\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mself_name\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    204\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 205\u001B[0;31m     \u001B[0msourcelines\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mfile_lineno\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mfilename\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mget_source_lines_and_file\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mcls\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mtorch\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m_C\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mErrorReport\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mcall_stack\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m    206\u001B[0m     \u001B[0msource\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0;34m''\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mjoin\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0msourcelines\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    207\u001B[0m     \u001B[0mdedent_src\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mdedent\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0msource\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m~/.local/lib/python3.6/site-packages/torch/_utils_internal.py\u001B[0m in \u001B[0;36mget_source_lines_and_file\u001B[0;34m(obj, error_msg)\u001B[0m\n\u001B[1;32m     52\u001B[0m     \u001B[0mfilename\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0;32mNone\u001B[0m  \u001B[0;31m# in case getsourcefile throws\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m     53\u001B[0m     \u001B[0;32mtry\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 54\u001B[0;31m         \u001B[0mfilename\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0minspect\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mgetsourcefile\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobj\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m     55\u001B[0m         \u001B[0msourcelines\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mfile_lineno\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0minspect\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mgetsourcelines\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobj\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m     56\u001B[0m     \u001B[0;32mexcept\u001B[0m \u001B[0mOSError\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0me\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m/usr/lib/python3.6/inspect.py\u001B[0m in \u001B[0;36mgetsourcefile\u001B[0;34m(object)\u001B[0m\n\u001B[1;32m    682\u001B[0m     \u001B[0mReturn\u001B[0m \u001B[0;32mNone\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mno\u001B[0m \u001B[0mway\u001B[0m \u001B[0mcan\u001B[0m \u001B[0mbe\u001B[0m \u001B[0midentified\u001B[0m \u001B[0mto\u001B[0m \u001B[0mget\u001B[0m \u001B[0mthe\u001B[0m \u001B[0msource\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    683\u001B[0m     \"\"\"\n\u001B[0;32m--> 684\u001B[0;31m     \u001B[0mfilename\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mgetfile\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobject\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m    685\u001B[0m     \u001B[0mall_bytecode_suffixes\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mimportlib\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mmachinery\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mDEBUG_BYTECODE_SUFFIXES\u001B[0m\u001B[0;34m[\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m]\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    686\u001B[0m     \u001B[0mall_bytecode_suffixes\u001B[0m \u001B[0;34m+=\u001B[0m \u001B[0mimportlib\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mmachinery\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mOPTIMIZED_BYTECODE_SUFFIXES\u001B[0m\u001B[0;34m[\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m]\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m~/.local/lib/python3.6/site-packages/torch/package/package_importer.py\u001B[0m in \u001B[0;36mpatched_getfile\u001B[0;34m(object)\u001B[0m\n\u001B[1;32m    590\u001B[0m         \u001B[0;32mif\u001B[0m \u001B[0mobject\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__module__\u001B[0m \u001B[0;32min\u001B[0m \u001B[0m_package_imported_modules\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    591\u001B[0m             \u001B[0;32mreturn\u001B[0m \u001B[0m_package_imported_modules\u001B[0m\u001B[0;34m[\u001B[0m\u001B[0mobject\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__module__\u001B[0m\u001B[0;34m]\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__file__\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 592\u001B[0;31m     \u001B[0;32mreturn\u001B[0m \u001B[0m_orig_getfile\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobject\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m    593\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    594\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;32m/usr/lib/python3.6/inspect.py\u001B[0m in \u001B[0;36mgetfile\u001B[0;34m(object)\u001B[0m\n\u001B[1;32m    652\u001B[0m             \u001B[0;32mif\u001B[0m \u001B[0mhasattr\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobject\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m'__file__'\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    653\u001B[0m                 \u001B[0;32mreturn\u001B[0m \u001B[0mobject\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__file__\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m--> 654\u001B[0;31m         \u001B[0;32mraise\u001B[0m \u001B[0mTypeError\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m'{!r} is a built-in class'\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mformat\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobject\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m    655\u001B[0m     \u001B[0;32mif\u001B[0m \u001B[0mismethod\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mobject\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m    656\u001B[0m         \u001B[0mobject\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mobject\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0m__func__\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n",
      "\u001B[0;31mTypeError\u001B[0m: <module '__main__'> is a built-in class"
     ]
    }
   ],
   "source": [
    "@torch.jit.script\n",
    "class BoxCoder(object):\n",
    "    def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)):\n",
    "        # type: (Tuple[float, float, float, float], float) -> None\n",
    "        self.weights = weights\n",
    "        self.bbox_xform_clip = bbox_xform_clip\n",
    "\n",
    "    def encode(self, reference_boxes, proposals):\n",
    "        # type: (List[Tensor], List[Tensor]) -> List[Tensor]\n",
    "        \"\"\"\n",
    "        以reference_boxes为target，计算reference_boxes和proposals之间的target_dx,dy,dw,dh\n",
    "        :param reference_boxes:[[xmin, ymin, xmax,ymax], [], ...]\n",
    "        :param proposals:每个anchors\n",
    "        :return:[image0[[dx,dy,dw,dh], [dx,dy,dw,dh],...], image1[], ...]\n",
    "        \"\"\"\n",
    "        boxes_per_image = [len(b) for b in reference_boxes]\n",
    "        reference_boxes = torch.cat(reference_boxes, dim=0)\n",
    "        proposals = torch.cat(proposals, dim=0)\n",
    "\n",
    "        targets = self.encode_single(reference_boxes, proposals)\n",
    "        return targets.split(boxes_per_image, 0)\n",
    "\n",
    "    def encode_single(self, reference_boxes, proposals):\n",
    "        dtype = reference_boxes.dtype\n",
    "        device = reference_boxes.device\n",
    "        weights = torch.as_tensor(self.weights, dtype=dtype, device=device)\n",
    "        targets = encode_boxes(reference_boxes, proposals, weights)\n",
    "        return targets\n",
    "\n",
    "    def decode(self, rel_codes, boxes):\n",
    "        # type: (Tensor, List[Tensor])\n",
    "        \"\"\"\n",
    "        输入偏移量和proposlas，得到偏移后的结果:\n",
    "        1. 将boxes中的xmin,ymin,xmax,ymax转换成ctr_x,ctr_y,w,h\n",
    "        2. 提取出rel_codes中的dx,dy,dw,dh\n",
    "        3. 将dx,dy,dw,dh施加到ctr_x,ctr_y,w,h得到新的ctr_x,ctr_y,w,h\n",
    "        4. 转换成新的xmin,ymin,xmax,ymax\n",
    "        Args:\n",
    "            rel_codes: [FPN特征层数 x BS x H x W x 15, 4]，预测的所有anchor的偏移量\n",
    "            boxes: List[Tensor0(image0的所有anchors), Tensor1(image1的所有anchors),...]，batch图像上所有的base anchor\n",
    "        Returns:\n",
    "            pred_boxes: [FPN特征层数 x BS x H x W x 15, 4]，所有偏移anchor过后得到的bbox\n",
    "        \"\"\"\n",
    "        assert isinstance(boxes, (list, tuple))\n",
    "        assert isinstance(rel_codes, torch.Tensor)\n",
    "        boxes_per_image = [b.size(0) for b in boxes]\n",
    "        concat_boxes = torch.cat(boxes, dim=0)\n",
    "\n",
    "        box_sum = 0\n",
    "        for val in boxes_per_image:\n",
    "            box_sum += val\n",
    "        pred_boxes = self.decode_single(rel_codes, concat_boxes)\n",
    "    \n",
    "    def decode_single(self, rel_codes, boxes):\n",
    "        boxes = boxes.to(rel_codes.dtype)\n",
    "        widths = boxes[:, 2] - boxes[:, 0]      # base_anchor的宽度\n",
    "        heights = boxes[:, 3] - boxes[:, 1]     # base_anchor的高度\n",
    "        ctr_x = boxes[:, 0] + 0.5 * widths      # anchor中心x坐标（就是cell左上角坐标）\n",
    "        ctr_y = boxes[:, 1] + 0.5 * heights     # anchor中心x坐标\n",
    "        \n",
    "        wx, wy, ww, wh = self.weights       # RPN中是[1,1,1,1],fastrcnn中是[10,10,5,5]\n",
    "        dx = rel_codes[:, 0::4] / wx   # 预测anchors/proposals的中心坐标x回归参数，采用::切片使得结果的维度为[n ,1]否则就是[n]\n",
    "        dy = rel_codes[:, 1::4] / wy   # 预测anchors/proposals的中心坐标y回归参数\n",
    "        dw = rel_codes[:, 2::4] / ww   # 预测anchors/proposals的宽度回归参数\n",
    "        dh = rel_codes[:, 3::4] / wh   # 预测anchors/proposals的高度回归参数\n",
    "\n",
    "        # limit max value, prevent sending too large values into torch.exp()\n",
    "        # self.bbox_xform_clip=math.log(1000. / 16)   4.135\n",
    "        dw = torch.clamp(dw, max=self.bbox_xform_clip)\n",
    "        dh = torch.clamp(dh, max=self.bbox_xform_clip)\n",
    "\n",
    "        pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]\n",
    "        pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]\n",
    "        pred_w = torch.exp(dw) * widths[:, None]\n",
    "        pred_h = torch.exp(dh) * widths[:, None]\n",
    "\n",
    "        # xmin\n",
    "        pred_boxes1 = pred_ctr_x - torch.tensor(0.5, dtype=pred_ctr_x.dtype, device=pred_w.device) * pred_w\n",
    "        # ymin\n",
    "        pred_boxes2 = pred_ctr_y - torch.tensor(0.5, dtype=pred_ctr_y.dtype, device=pred_h.device) * pred_h\n",
    "        # xmax\n",
    "        pred_boxes3 = pred_ctr_x + torch.tensor(0.5, dtype=pred_ctr_x.dtype, device=pred_w.device) * pred_w\n",
    "        # ymax\n",
    "        pred_boxes4 = pred_ctr_y + torch.tensor(0.5, dtype=pred_ctr_y.dtype, device=pred_h.device) * pred_h\n",
    "\n",
    "        pred_boxes = torch.stack((pred_boxes1, pred_boxes2, pred_boxes3, pred_boxes4), dim=2).flatten(1)\n",
    "        return pred_boxes\n",
    "\n",
    "@torch.jit._script_if_tracing\n",
    "def encode_boxes(reference_boxes, proposals, weights):\n",
    "    \"\"\"\n",
    "    以reference_boxes为target，计算reference_boxes和proposals之间的target_dx,dy,dw,dh\n",
    "    :param reference_boxes:[x1,y1,x2,y2]\n",
    "    :param proposals:每个anchor的坐标\n",
    "    :param weights: 每个坐标的权重，默认为1\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    wx = weights[0]\n",
    "    wy = weights[1]\n",
    "    ww = weights[2]\n",
    "    wh = weights[3]\n",
    "\n",
    "    proposals_x1 = proposals[:, 0].unsqueeze(1)\n",
    "    proposals_y1 = proposals[:, 1].unsqueeze(1)\n",
    "    proposals_x2 = proposals[:, 2].unsqueeze(1)\n",
    "    proposals_y2 = proposals[:, 3].unsqueeze(1)\n",
    "\n",
    "    reference_boxes_x1 = reference_boxes[:, 0].unsqueeze(1)\n",
    "    reference_boxes_y1 = reference_boxes[:, 1].unsqueeze(1)\n",
    "    reference_boxes_x2 = reference_boxes[:, 2].unsqueeze(1)\n",
    "    reference_boxes_y2 = reference_boxes[:, 3].unsqueeze(1)\n",
    "\n",
    "    ex_widths = proposals_x2 - proposals_x1\n",
    "    ex_heights = proposals_y2 - proposals_y1\n",
    "    ex_ctr_x = proposals_x2 - 0.5 * ex_widths\n",
    "    ex_ctr_y = proposals_y2 - 0.5 * ex_heights\n",
    "\n",
    "    gt_widths = reference_boxes_x2 - reference_boxes_x1\n",
    "    gt_heights = reference_boxes_y2 - reference_boxes_y1\n",
    "    gt_ctr_x = reference_boxes_x1 + 0.5 * gt_widths\n",
    "    gt_ctr_y = reference_boxes_y1 + 0.5 * gt_heights\n",
    "\n",
    "    targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths\n",
    "    targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights\n",
    "    targets_dw = ww * torch.log(gt_widths / ex_widths)\n",
    "    targets_dh = wh * torch.log(gt_heights / ex_heights)\n",
    "\n",
    "    targets = torch.cat((targets_dx, targets_dy, targets_dw, targets_dh), dim=1)\n",
    "    return targets\n",
    "\n",
    "class Matcher(object):\n",
    "    \"\"\"\n",
    "    判断正负anchor，再为每个正样本anchor分配一个GT\n",
    "    匹配规则：\n",
    "    1. 输入一个IoU矩阵，行为GT，列为anchor，在每列里求最大值\n",
    "    2. 获得了每个anchor的最大IoU的GT（给每个anchor分配了一个与之IoU最大的GT）\n",
    "    3. 当这个IoU<0.3时为负样本，当0.3<IoU<0.7时丢弃样本\n",
    "    4. 当有GT没有分配给任何anchor正样本时（它分配的样本都是负的<0.7），则allow_low_quality，选取与该GT匹配的IoU值最大的anchor\n",
    "    :returns:[1,2,-1,-2,5,...]返回每个anchors的情况，-1负样本，-2丢弃样本\n",
    "    \"\"\"\n",
    "    def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False):\n",
    "        self.BELOW_LOW_THRESHOLD = -1\n",
    "        self.BETWEEN_THRESHOLDS = -2\n",
    "        assert low_threshold <= high_threshold\n",
    "        self.high_threshold = high_threshold  # 0.7\n",
    "        self.low_threshold = low_threshold    # 0.3\n",
    "        self.allow_low_quality_matches = allow_low_quality_matches\n",
    "    def __call__(self, match_quality_matrix):\n",
    "        if match_quality_matrix.numel() == 0:\n",
    "            # empty targets or proposals not supported during training\n",
    "            if match_quality_matrix.shape[0] == 0:\n",
    "                raise ValueError(\n",
    "                    \"No ground-truth boxes available for one of the images \"\n",
    "                    \"during training\")\n",
    "            else:\n",
    "                raise ValueError(\n",
    "                    \"No proposal boxes available for one of the images \"\n",
    "                    \"during training\")\n",
    "        # 获得每个anchor的 与之有最大IoU的GT：在每列里求最大值\n",
    "        # matches:[2,1,0,1,...]，matches[i]表示和第i个anchor的IoU最大的GT索引\n",
    "        # matched_vals:[0.9, 0.6, 0.5, 0.3, ...] matched_vals[i]表示第i个anchor与GT最大的IoU\n",
    "        matched_vals, matches = match_quality_matrix.max(dim=0)  # the dimension to reduce.\n",
    "        if self.allow_low_quality_matches:\n",
    "            all_matches = matches.clone()\n",
    "        else:\n",
    "            all_matches = None\n",
    "\n",
    "        below_low_threshold = matched_vals < self.low_threshold\n",
    "        between_threshold = (matched_vals >= self.low_threshold) & (matched_vals < self.high_threshold)\n",
    "        matches[below_low_threshold] = self.BELOW_LOW_THRESHOLD # -1 负样本anchors\n",
    "        matches[between_threshold] = self.BETWEEN_THRESHOLDS    # -2 丢弃的anchors\n",
    "\n",
    "        if self.allow_low_quality_matches:\n",
    "            assert all_matches is not None\n",
    "            self.set_low_quality_matches_(matches, all_matches, match_quality_matrix)\n",
    "\n",
    "        return matches\n",
    "    def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix):\n",
    "        highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)  # the dimension to reduce.\n",
    "        gt_pred_pairs_of_highest_quality = torch.where(\n",
    "            match_quality_matrix == highest_quality_foreach_gt[:, None]\n",
    "        )\n",
    "        pre_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]\n",
    "        matches[pre_inds_to_update] = all_matches[pre_inds_to_update]\n",
    "\n",
    "class BalancedPositiveNegativeSampler(object):\n",
    "    def __init__(self, batch_size_per_image, positive_fraction):\n",
    "        # type: (int, float) -> None\n",
    "        \"\"\"\n",
    "        输入\n",
    "        :param batch_size_per_image:每张图片选取的样本数量\n",
    "        :param positive_fraction:样本数量中正样本最高占比\n",
    "        \"\"\"\n",
    "        self.batch_size_per_image = batch_size_per_image\n",
    "        self.positive_fraction = positive_fraction\n",
    "\n",
    "    def __call__(self, matched_idxs):\n",
    "        # type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]\n",
    "        \"\"\"\n",
    "        对batch里每张图片的anchor进行随机选取，每张图片中选取一定数量的正样本和负样本\n",
    "        :param matched_idxs(List[[1,0,-1,1,...], [], [],...]): 记录了batch里每张图片的每个anchor的正、负、被丢弃的情况\n",
    "        :return: 返回两个分别记录了选取的正样本和负样本的索引\n",
    "        \"\"\"\n",
    "        pos_idx = []\n",
    "        neg_idx = []\n",
    "\n",
    "        for matched_idxs_per_image in matched_idxs:\n",
    "            positive = torch.nonzero(matched_idxs_per_image >= 1).squeeze(1)\n",
    "            negative = torch.nonzero(matched_idxs_per_image == 0).squeeze(1)\n",
    "\n",
    "            num_pos = int(self.batch_size_per_image * self.positive_fraction)\n",
    "            num_pos = min(positive.numel(), num_pos)\n",
    "\n",
    "            num_neg = self.batch_size_per_image - num_pos\n",
    "            num_neg = min(negative.numel(), num_neg)\n",
    "\n",
    "            perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]\n",
    "            perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]\n",
    "\n",
    "            pos_idx_per_image = positive[perm1]\n",
    "            neg_idx_per_image = negative[perm2]\n",
    "\n",
    "            pos_idx_per_image_mask = torch.zeros_like(\n",
    "                matched_idxs_per_image, dtype=torch.uint8\n",
    "            )\n",
    "            neg_idx_per_image_mask = torch.zeros_like(\n",
    "                matched_idxs_per_image, dtype=torch.uint8\n",
    "            )\n",
    "\n",
    "            pos_idx_per_image_mask[pos_idx_per_image] = 1\n",
    "            neg_idx_per_image_mask[neg_idx_per_image] = 1\n",
    "\n",
    "            pos_idx.append(pos_idx_per_image_mask)\n",
    "            neg_idx.append(neg_idx_per_image_mask)\n",
    "\n",
    "        return pos_idx, neg_idx\n",
    "\n",
    "def smooth_l1_loss(input, target, beta: float = 1. / 9, size_average: bool = True):\n",
    "    \"\"\"\n",
    "    L_reg = smooth(|input - target|)\n",
    "    if || < beta:0.5||^2/beta, else:|| - 0.5*beta\n",
    "    :param input:[num_pos, 4]所有正样本的预测偏离：dx,dy,dw,dh\n",
    "    :param target:[num_pos, 4]所有正样本的目标偏离\n",
    "    :param beta:smooth的参数\n",
    "    :param size_average:\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    n = torch.abs(input - target)\n",
    "    cond = torch.lt(n, beta)\n",
    "    loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)\n",
    "    if size_average:\n",
    "        return loss.mean()\n",
    "    return loss.sum()\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "# box operations"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def clip_boxes_to_image(boxes, size):\n",
    "    # type: (Tensor, Tuple[int, int]) -> Tensor\n",
    "\n",
    "    dim = boxes.dim()\n",
    "    boxes_x = boxes[..., 0::2]\n",
    "    boxes_y = boxes[..., 1::2]\n",
    "    height, width = size\n",
    "\n",
    "    if torchvision._is_tracing():\n",
    "        boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))\n",
    "        boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device))\n",
    "        boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))\n",
    "        boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device))\n",
    "    else:\n",
    "        boxes_x = boxes_x.clamp(min=0, max=width)   # 限制x坐标范围在[0,width]之间\n",
    "        boxes_y = boxes_y.clamp(min=0, max=height)  # 限制y坐标范围在[0,height]之间\n",
    "\n",
    "    clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim)\n",
    "    return clipped_boxes.reshape(boxes.shape)\n",
    "\n",
    "def remove_small_boxes(boxes, min_size):\n",
    "    # type: (Tensor, float) -> Tensor\n",
    "    ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]\n",
    "    #keep = (ws >= min_size) & (hs >= min_size)\n",
    "    keep = torch.logical_and(torch.gt(ws, min_size), torch.ge(hs, min_size))\n",
    "    keep = keep .nonzero().squeeze(1)\n",
    "    return keep\n",
    "\n",
    "def batched_nms(boxes, scores, idxs, iou_threshold):\n",
    "    # type: (Tensor, Tensor, Tensor, float) -> Tensor\n",
    "    \"\"\"\n",
    "    该函数通过给不同FPN特征层的boxes的4个坐标加一个不同的特别大的数，使得不同特征层的boxes会分离开来很远\n",
    "    从而达到在不同特征层分别做NMS的效果\n",
    "    Args:\n",
    "        boxes:一张图片上的各个FPN特征层上的boxes[n, 4]\n",
    "        scores: 各个boxes对应的分数[n, 1]\n",
    "        idxs: 各个boxes在哪个特征层上或者属于哪个类别的标记[0,0,0,0,...,1,1,1,1,...,2,2,2,...]\n",
    "        iou_threshold: 阈值\n",
    "    Returns:\n",
    "\n",
    "    \"\"\"\n",
    "    if boxes.numel() == 0:\n",
    "        return torch.empty((0,), dtype=torch.int64, device=boxes.device)\n",
    "    # 根据idx的对每个boxes的标记，将一个特征层上(RPN)或者一个类别(Fast-rcnn)的boxes和其他的框偏离开（通过加一个数）\n",
    "    max_coordinate = boxes.max()\n",
    "    offsets = idxs.to(boxes) * (max_coordinate + 1)\n",
    "    boxes_for_nms = boxes + offsets[:, None]\n",
    "    keep = nms(boxes_for_nms, scores, iou_threshold)\n",
    "    return keep\n",
    "\n",
    "def nms(boxes, scores, iou_threshold):\n",
    "    # type: (Tensor, Tensor, float) -> Tensor\n",
    "    \"\"\"\n",
    "    Performs non-maximum suppression (NMS) on the boxes according\n",
    "    to their intersection-over-union (IoU).\n",
    "\n",
    "    NMS iteratively removes lower scoring boxes which have an\n",
    "    IoU greater than iou_threshold with another (higher scoring)\n",
    "    box.\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    boxes : Tensor[N, 4])\n",
    "        boxes to perform NMS on. They\n",
    "        are expected to be in (x1, y1, x2, y2) format\n",
    "    scores : Tensor[N]\n",
    "        scores for each one of the boxes\n",
    "    iou_threshold : float\n",
    "        discards all overlapping\n",
    "        boxes with IoU < iou_threshold\n",
    "\n",
    "    Returns\n",
    "    -------\n",
    "    keep : Tensor\n",
    "        int64 tensor with the indices\n",
    "        of the elements that have been kept\n",
    "        by NMS, sorted in decreasing order of scores\n",
    "    \"\"\"\n",
    "    return torch.ops.torchvision.nms(boxes, scores, iou_threshold)\n",
    "\n",
    "def box_iou(boxes1, boxes2):\n",
    "    \"\"\"\n",
    "    返回以boxes1为行，boxes2为列的IoU矩阵\n",
    "    Args:\n",
    "        boxes1 (Tensor[N, 4])\n",
    "        boxes2 (Tensor[M, 4])\n",
    "    Returns:\n",
    "        iou (Tensor[N, M]): NxM矩阵记录了boxes1和boxes2中任意两个box之间的IoU\n",
    "    \"\"\"\n",
    "    area1 = box_area(boxes1)\n",
    "    area2 = box_area(boxes2)\n",
    "    lt = torch.max(boxes1[:, None, :2], boxes2[:, :2])  # left-top [N,M,2]\n",
    "    rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])  # right-bottom [N,M,2]\n",
    "\n",
    "    wh = (rb - lt).clamp(min=0)  # [N,M,2]\n",
    "    inter = wh[:, :, 0] * wh[:, :, 1]  # [N,M]\n",
    "\n",
    "    iou = inter / (area1[:, None] + area2 - inter)\n",
    "    return iou\n",
    "\n",
    "\n",
    "def box_area(boxes):\n",
    "    return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 整体框架\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "source": [
    "## MLP Haed"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class TwoMLPHead(nn.Module):\n",
    "    def __init__(self, in_channels, representation_size):\n",
    "        \"\"\"\n",
    "        RPN从原图中提取的候选框映射到backbone输出的特征图之后，再经过RoIAlign层，得到的固定尺寸的输出\n",
    "        再送入TwoMLPHead\n",
    "        :param in_channels: backbone.outchannels * (7*7)\n",
    "        :param representation_size:第一个全连接层的输出和第二个全连接层的输入和输出\n",
    "        \"\"\"\n",
    "        super(TwoMLPHead, self).__init__()\n",
    "\n",
    "        self.fc6 = nn.Linear(in_channels, representation_size)\n",
    "        self.fc7 = nn.Linear(representation_size, representation_size)\n",
    "\n",
    "    def forward(self, x):\n",
    "        \"\"\"\n",
    "        此处输入的x并不是一个RoI，而是整个batch图片提取的RoI经过RoIAlign之后所有固定长度的\n",
    "        RoI特征，可进行并行化计算\n",
    "        :param x: [all_roi, 256, 7, 7]\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        x = x.flatten(start_dim=1)      #进行fc之前必须展平\n",
    "\n",
    "        x = F.relu(self.fc6(x))\n",
    "        x = F.relu(self.fc7(x))\n",
    "\n",
    "        return x"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Fast-RCNN Predictor"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class FastRCNNPredictor(nn.Module):\n",
    "    def __init__(self, in_channels, num_classes):\n",
    "        super(FastRCNNPredictor, self).__init__()\n",
    "        self.cls_score = nn.Linear(in_channels, num_classes)\n",
    "        self.bbox_pred = nn.Linear(in_channels, num_classes * 4)\n",
    "\n",
    "    def forward(self, x):\n",
    "        \"\"\"\n",
    "\n",
    "        :param x:[nums_roi, representation]\n",
    "        :return: [nums_roi, num_classes], [nums_roi, num_classes*4]\n",
    "        \"\"\"\n",
    "        if x.dim() == 4:\n",
    "            assert list(x.shape[2:]) == [1,1]\n",
    "        x = x.flatten(start_dim=1)  #不起作用\n",
    "        scores = self.cls_score(x)\n",
    "        bbox_deltas = self.bbox_pred(x)\n",
    "\n",
    "        return scores, bbox_deltas"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## fast-rcnn loss"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def fastrcnn_loss(class_logits, box_regression, labels, regression_targets):\n",
    "    # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor]\n",
    "    \"\"\"\n",
    "\n",
    "    :param class_logits: 经过IoU矩阵选取采样过后的样本的预测结果\n",
    "    :param box_regression:经过IoU矩阵选取采样过后的样本的预测结果\n",
    "    :param labels: 经过IoU矩阵选取采样过后的样本的标签\n",
    "    :param regression_targets: 经过IoU矩阵选取采样过后的样本和它对应的GT的target_dx,dw,dy,dh\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    labels = torch.cat(labels, dim=0)\n",
    "    regression_targets = torch.cat(regression_targets, dim=0)\n",
    "\n",
    "    # 计算类别分类损失\n",
    "    classification_loss = F.cross_entropy(class_logits, labels)\n",
    "\n",
    "    sampled_pos_inds_subset = torch.where(torch.gt(labels, 0))[0]\n",
    "\n",
    "    labels_pos = labels[sampled_pos_inds_subset]\n",
    "\n",
    "    N, num_classes = class_logits.shape\n",
    "    # [N, 21, 4]\n",
    "    box_regression = box_regression.reshape(N, -1, 4)\n",
    "    # proposals每个类别都预测了一个位置偏移，求损失时，只求该proposals对应的GT的类别的位置偏移损失\n",
    "    box_loss = smooth_l1_loss(\n",
    "        box_regression[sampled_pos_inds_subset, labels_pos],\n",
    "        regression_targets[sampled_pos_inds_subset],\n",
    "        beta= 1 / 9,\n",
    "        size_average=False\n",
    "    ) / labels.numel()\n",
    "\n",
    "    return classification_loss, box_loss\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## RoI Head"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class RoIHeads(nn.Module):\n",
    "    def __init__(self,\n",
    "                 box_roi_pool,\n",
    "                 box_head,\n",
    "                 box_predictor,\n",
    "                 # Faster R-CNN training\n",
    "                 fg_iou_thresh, bg_iou_thresh,  # default: 0.5, 0.5\n",
    "                 batch_size_per_image, positive_fraction,  # default: 512, 0.25\n",
    "                 bbox_reg_weights,  # None\n",
    "                 # Faster R-CNN inference\n",
    "                 score_thresh,        # default: 0.05\n",
    "                 nms_thresh,          # default: 0.5\n",
    "                 detection_per_img):  # default: 100\n",
    "        super(RoIHeads, self).__init__()\n",
    "\n",
    "        self.box_similarity = box_iou\n",
    "        self.proposal_matcher = Matcher(\n",
    "            fg_iou_thresh,\n",
    "            bg_iou_thresh,\n",
    "            allow_low_quality_matches=False\n",
    "        )\n",
    "        self.fg_bg_sampler = BalancedPositiveNegativeSampler(\n",
    "            batch_size_per_image,   # 512\n",
    "            positive_fraction       # 0.25\n",
    "        )\n",
    "\n",
    "        if bbox_reg_weights is None:\n",
    "            bbox_reg_weights = (10., 10., 5., 5.)\n",
    "\n",
    "        self.box_coder = BoxCoder(bbox_reg_weights)\n",
    "        self.box_roi_pool = box_roi_pool\n",
    "        self.box_head = box_head\n",
    "        self.box_predictor = box_predictor\n",
    "        self.score_thresh = score_thresh    # 0.05\n",
    "        self.nms_thresh = nms_thresh        #0.5\n",
    "        self.detection_per_img = detection_per_img  #100\n",
    "\n",
    "    def forward(self,\n",
    "                features,       # type: Dict[str, Tensor]\n",
    "                proposals,      # type: List[Tensor]\n",
    "                image_shapes,   # type: List[Tuple[int, int]]\n",
    "                targets=None    # type: Optional[List[Dict[str, Tensor]]]\n",
    "                ):\n",
    "        \"\"\"\n",
    "        :param features:backbone输出的特征图\n",
    "        :param proposals:RPN得到的经过5步filter_proposals的预测位置\n",
    "        :param image_shapes:插值后，填充前的尺寸\n",
    "        :param targets:\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        if targets is not None:\n",
    "            for t in targets:\n",
    "                floating_point_types = (torch.float, torch.double, torch.half)\n",
    "                assert t[\"boxes\"].dtype in floating_point_types\n",
    "                assert t[\"labels\"].dtype == torch.int64\n",
    "\n",
    "        if self.training:\n",
    "            proposals, matched_idx, labels, regression_targets = self.select_training_samples(proposals, targets)\n",
    "        else:\n",
    "            labels = None\n",
    "            regression_targets = None\n",
    "            matched_idx = None\n",
    "\n",
    "        # [nums_proposals, 256, 7, 7]，此处如果训练模式的话，就会输入512个正负样本中挑选的样本\n",
    "        # 测试的话，就是RPN中过滤的1000个\n",
    "        box_features = self.box_roi_pool(features, proposals, image_shapes)\n",
    "        # [nums_proposals, representation_sizes]\n",
    "        box_features = self.box_head(box_features)\n",
    "        # [nums_proposals, num_class+1], [nums_proposals, (num_class+1)*4]\n",
    "        class_logits, box_regression = self.box_predictor(box_features)\n",
    "\n",
    "        result = torch.jit.annotate(List[Dict[str, torch.Tensor]], [])\n",
    "        losses = {}\n",
    "        if self.training:\n",
    "            assert labels is not None and regression_targets is not None\n",
    "            loss_classifier, loss_box_reg = fastrcnn_loss(\n",
    "                class_logits, box_regression, labels, regression_targets)\n",
    "            losses = {\n",
    "                \"loss_classifier\": loss_classifier,\n",
    "                \"loss_box_reg\": loss_box_reg\n",
    "            }\n",
    "        else:\n",
    "            boxes, scores, labels = self.postprocess_detections(class_logits, box_regression, proposals, image_shapes)\n",
    "            num_images = len(boxes)\n",
    "            for i in range(num_images):\n",
    "                result.append(\n",
    "                    {\n",
    "                        \"boxes\": boxes[i],\n",
    "                        \"labels\": labels[i],\n",
    "                        \"scores\": scores[i],\n",
    "                    }\n",
    "                )\n",
    "\n",
    "        return result, losses\n",
    "\n",
    "\n",
    "    def add_gt_proposals(self, proposals, gt_boxes):\n",
    "        # type: (List[Tensor], List[Tensor]) -> List[Tensor]\n",
    "        \"\"\"\n",
    "        将gt_boxes拼接到proposal后面\n",
    "        Args:\n",
    "            proposals: 一个batch中每张图像rpn预测的boxes\n",
    "            gt_boxes:  一个batch中每张图像对应的真实目标边界框\n",
    "\n",
    "        Returns:\n",
    "\n",
    "        \"\"\"\n",
    "        proposals = [\n",
    "            torch.cat((proposal, gt_box))\n",
    "            for proposal, gt_box in zip(proposals, gt_boxes)\n",
    "        ]\n",
    "        return proposals\n",
    "\n",
    "    def check_targets(self, targets):\n",
    "        # type: (Optional[List[Dict[str, Tensor]]]) -> None\n",
    "        assert targets is not None\n",
    "        assert all([\"boxes\" in t for t in targets])\n",
    "        assert all([\"labels\" in t for t in targets])\n",
    "\n",
    "    def select_training_samples(self,\n",
    "                               proposals,   # type: List[Tensor]\n",
    "                               targets      # type: Optional[List[Dict[str, Tensor]]]\n",
    "                               ):\n",
    "        \"\"\"\n",
    "        1. 通过IoU矩阵为RPN输入进来的每个proposals分配一个GT，并根据二者之间的IoU判断正负样本，将第0个GT分配给负样本*\n",
    "        2. 在batch中每张图片选取相同数量的proposals，其中正负样本占比固定（正样本不够则负样本补充）\n",
    "        3. 计算选取的proposals和它所属的GT之间的target_dx,dy,dw,dh\n",
    "        4. labels记录了每个proposals对应的类别，负样本(背景)为0\n",
    "        :param proposals:\n",
    "        :param targets:\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        self.check_targets(targets)\n",
    "        assert targets is not None\n",
    "\n",
    "        dtype = proposals[0].dtype\n",
    "        device = proposals[0].device\n",
    "        # 获取标注好的boxes以及labels信息\n",
    "        gt_boxes = [t[\"boxes\"].to(dtype) for t in targets]\n",
    "        gt_labels = [t[\"labels\"] for t in targets]\n",
    "        proposals = self.add_gt_proposals(proposals, gt_boxes)\n",
    "        matched_idxs, labels = self.assign_targets_to_proposals(proposals, gt_boxes, gt_labels)\n",
    "        sampled_inds = self.subsample(labels)\n",
    "        matched_gt_boxes = []\n",
    "        num_images = len(proposals)\n",
    "\n",
    "        for img_id in range(num_images):\n",
    "            img_sampled_inds = sampled_inds[img_id]\n",
    "            proposals[img_id] = proposals[img_id][img_sampled_inds]\n",
    "\n",
    "            labels[img_id] = labels[img_id][img_sampled_inds]\n",
    "            matched_idxs[img_id] = matched_idxs[img_id][img_sampled_inds]\n",
    "            gt_boxes_in_image  =gt_boxes[img_id]\n",
    "            if gt_boxes_in_image.numel() == 0:\n",
    "                gt_boxes_in_image = torch.zeros((1, 4), dtype=dtype, device=device)\n",
    "            matched_gt_boxes.append(gt_boxes_in_image[matched_idxs[img_id]])\n",
    "        # 计算采样的一定数量的RPN输入进来的每个proposals和它匹配的targets之间的target_dx,dy,dw,dh\n",
    "        regression_targets = self.box_coder.encode(matched_gt_boxes, proposals)\n",
    "        return proposals, labels, regression_targets\n",
    "\n",
    "\n",
    "    def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels):\n",
    "        # type: (List[Tensor], List[Tensor], List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]\n",
    "        \"\"\"\n",
    "        给每个RPN输入过来的经过过滤的proposals分配一个GT，并且根据它和分配的GT的IoU判断正负样本\n",
    "        Args:\n",
    "            proposals:\n",
    "            gt_boxes:\n",
    "            gt_labels:\n",
    "        Returns:\n",
    "            matched_idx:[image0[0, 1, 2, 5,...],image1[]]，数字代表该proposal对应该image的target['boxes']中的第几个，注意负样本也被假设为第0个\n",
    "            labels:[image0[猫, 狗, 0, 鸡, 0,...], image1[],...]\n",
    "        \"\"\"\n",
    "        matched_idxs = []\n",
    "        labels = []\n",
    "        for proposals_in_image, gt_boxes_in_image, gt_labels_in_image in zip(proposals, gt_boxes, gt_labels):\n",
    "            if gt_boxes_in_image.numel() == 0:  # 该张图像中没有gt框，为背景\n",
    "                # background image\n",
    "                device = proposals_in_image.device\n",
    "                clamped_matched_idxs_in_image = torch.zeros(\n",
    "                    (proposals_in_image.shape[0],), dtype=torch.int64, device=device\n",
    "                )\n",
    "                labels_in_image = torch.zeros(\n",
    "                    (proposals_in_image.shape[0],), dtype=torch.int64, device=device\n",
    "                )\n",
    "            else:\n",
    "                #  set to self.box_similarity when https://github.com/pytorch/pytorch/issues/27495 lands\n",
    "                # 计算proposal与每个gt_box的iou重合度\n",
    "                match_quality_matrix = box_iou(gt_boxes_in_image, proposals_in_image)\n",
    "                # fg_iou_thresh和bg_iou_thresh相等，所以只有正样本和负样本，没有丢弃样本\n",
    "                #[-2, -1, 0, 5, 3,...]与每个proposal的iou最大的gt的索引\n",
    "                matched_idxs_in_image = self.proposal_matcher(match_quality_matrix)\n",
    "                clamped_matched_idxs_in_image = matched_idxs_in_image.clamp(min=0)\n",
    "                # matched_idx_in_image列表中是每个proposals匹配的gt列表的索引\n",
    "                # gt列表有标签列表gt_labels和位置列表gt_boxes\n",
    "                # 先假设丢弃样本和负样本的标签都是列表中的第0个标签(不是0)，再直接把负样本的标签置为0(代表背景)\n",
    "                labels_in_image = gt_labels_in_image[clamped_matched_idxs_in_image]\n",
    "                labels_in_image = labels_in_image.to(dtype=torch.int64)\n",
    "                bg_inds = matched_idxs_in_image == self.proposal_matcher.BELOW_LOW_THRESHOLD\n",
    "                labels_in_image[bg_inds] = 0\n",
    "                ignore_inds = matched_idxs_in_image == self.proposal_matcher.BETWEEN_THRESHOLDS\n",
    "                labels_in_image[ignore_inds] = -1\n",
    "            matched_idxs.append(clamped_matched_idxs_in_image)\n",
    "            labels.append(labels_in_image)\n",
    "\n",
    "        return matched_idxs, labels\n",
    "\n",
    "    def subsample(self, labels):\n",
    "        # type: (List[Tensor]) -> List[Tensor]\n",
    "        \"\"\"\n",
    "        随机从每个image中选取相同数量的样本，其中正样本所占比例固定，不够的话就用负样本补齐。\n",
    "        :param labels:[image0[cat, 0, dog, 0,...], image1[], ...]\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)\n",
    "        sampled_inds = []\n",
    "        for img_idx , (pos_inds_img, neg_inds_img) in enumerate(zip(sampled_pos_inds, sampled_neg_inds)):\n",
    "            img_sampled_inds = torch.where(pos_inds_img | neg_inds_img)[0]\n",
    "            sampled_inds.append(img_sampled_inds)\n",
    "        return sampled_inds\n",
    "\n",
    "    def postprocess_detections(self,\n",
    "                               class_logits,    # type: Tensor\n",
    "                               box_regression,  # type: Tensor\n",
    "                               proposals,       # type: List[Tenspor]\n",
    "                               image_shapes     # type: List[Tuple[int, int]]\n",
    "                               ):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            class_logits:\n",
    "        Returns:\n",
    "\n",
    "        \"\"\"\n",
    "        device = class_logits.device\n",
    "        num_classes = class_logits.shape[-1]\n",
    "        # 每张图片的proposals数量\n",
    "        boxes_per_image = [boxes_in_image.shape[0] for  boxes_in_image in proposals]\n",
    "        # 将dx,dy,dw,dh施加到proposals上\n",
    "        pred_boxes = self.box_coder.decode(box_regression, proposals)\n",
    "        pred_scores = F.softmax(class_logits, -1)\n",
    "\n",
    "        pred_boxes_list = pred_boxes.split(boxes_per_image, 0)\n",
    "        pred_scores_list = pred_scores.split(boxes_per_image, 0)\n",
    "\n",
    "        all_boxes = []\n",
    "        all_scores = []\n",
    "        all_labels = []\n",
    "\n",
    "        for boxes, scores, image_shape in zip(pred_boxes_list, pred_scores_list, image_shapes):\n",
    "            # 将预测的bbox的边界限制在插值后、填充前的image之内\n",
    "            boxes = clip_boxes_to_image(boxes, image_shape)\n",
    "\n",
    "            labels = torch.arange(num_classes, device=device)\n",
    "            labels = labels.view(1, -1).expand_as(scores)\n",
    "\n",
    "            boxes = boxes[:, 1:]    # [1000, 21, 4]\n",
    "            scores = scores[:, 1:]  #[1000, 21]假如softmax之后，一个bbox的最大概率就是0背景？？\n",
    "            labels = labels[:, 1:]\n",
    "\n",
    "            boxes = boxes.reshape(-1, 4)    #[1000*21, 4]\n",
    "            scores = scores.reshape(-1)     #[1000*21]\n",
    "            labels = labels.reshape(-1)\n",
    "            # 1. 移除类别概率scores小于0.05的目标\n",
    "            inds = torch.where(torch.gt(scores, self.score_thresh))[0]\n",
    "            boxes, scores, labels = boxes[inds], scores[inds], labels[inds]\n",
    "            # 2. 移除小目标\n",
    "            keep = remove_small_boxes(boxes, min_size=1)\n",
    "            boxes, scores, labels = boxes[keep], scores[keep], labels[keep]\n",
    "            # 3. NMS处理\n",
    "            keep = batched_nms(boxes, scores, labels, self.nms_thresh)\n",
    "            # 4. 根据scores大小排序，取前topk个预测目标\n",
    "            keep = keep[:self.detection_per_img]\n",
    "            boxes, scores, labels = boxes[inds], scores[inds], labels[inds]\n",
    "            all_boxes.append(boxes)\n",
    "            all_scores.append(scores)\n",
    "            all_labels.append(labels)\n",
    "\n",
    "        return all_boxes, all_scores, all_labels\n",
    "\n",
    "\n",
    "\n",
    "\n"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Faster RCNN Base"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class FasterRCNNBase(nn.Module):\n",
    "    \"\"\"\n",
    "    Inputs:\n",
    "        images:[Tensor[Tensor]]：一个batch的未经处理的image\n",
    "    \"\"\"\n",
    "    def __init__(self, backbone, rpn, roi_heads, transform):\n",
    "        super(FasterRCNNBase).__init__()\n",
    "        self.transform = transform\n",
    "        self.backbone = backbone\n",
    "        self.rpn = rpn\n",
    "        self.roi_heads = roi_heads\n",
    "        # used only on torchscript mode\n",
    "        self._has_warned = False\n",
    "    def eager_outputs(self, losses, detections):\n",
    "        # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Union[Dict[str, Tensor], List[Dict[str, Tensor]]]\n",
    "        if self.training:\n",
    "            return losses\n",
    "\n",
    "        return detections\n",
    "    def forward(self, images, targets=None):\n",
    "        if self.training and targets is None:\n",
    "            raise ValueError(\"In train mode but don't have targets\")\n",
    "        # 检查batch_target中每个target字典中的boxes是否是1x4维度的Tensor\n",
    "        if self.training:\n",
    "            assert targets is not None\n",
    "            for target in targets:\n",
    "                boxes = target[\"boxes\"]\n",
    "                if isinstance(boxes, torch.Tensor):\n",
    "                    if len(boxes.shape) != 2 or boxes.shape[-1] != 4:\n",
    "                        raise ValueError(\"标签中boxes应该是1x4维的tensor\")\n",
    "                else:\n",
    "                    raise ValueError(\"标签中boxes应该是Tensor类型\")\n",
    "        #记录未经过任何处理的batch images的原始尺寸\n",
    "        original_image_sizes: List[Tuple[int, int]] = []\n",
    "        for img in images:\n",
    "            val = img.shape[-2:]    #pytorch中图片[channels, height, weight]\n",
    "            assert len(val) == 2\n",
    "            original_image_sizes.append((val[0], val[1]))\n",
    "        # 对原始batch_images进插值和填充成一样尺寸\n",
    "        images, targets = self.transform(images, targets)\n",
    "        # 特征提取\n",
    "        features = self.backbone(images.tensors)\n",
    "\n",
    "        if isinstance(features, torch.Tensor):          #如果没有使用FPN\n",
    "            features = OrderedDict([('0', features)])\n",
    "        # 将特征层以及标注target信息传入rpn中\n",
    "        # proposals: List[Tensor], Tensor_shape: [num_proposals, 4],\n",
    "        # 每个proposals是绝对坐标，且为(x1, y1, x2, y2)格式,经过filter_proposals的结果\n",
    "        proposals, proposal_losses = self.rpn(images, features, targets)\n",
    "\n",
    "        # 将rpn生成的数据以及标注target信息传入fast rcnn后半部分\n",
    "        detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets)\n",
    "\n",
    "        # 对网络的预测结果进行后处理（主要将bboxes还原到原图像尺度上）\n",
    "        detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes)\n",
    "\n",
    "        losses = {}\n",
    "        losses.update(detector_losses)\n",
    "        losses.update(proposal_losses)\n",
    "\n",
    "        if torch.jit.is_scripting():\n",
    "            if not self._has_warned:\n",
    "                warnings.warn(\"RCNN always returns a (Losses, Detections) tuple in scripting\")\n",
    "                self._has_warned = True\n",
    "            return losses, detections\n",
    "        else:\n",
    "            return self.eager_outputs(losses, detections)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## Faster RCNN"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "class FasterRCNN(FasterRCNNBase):\n",
    "    \"\"\"\n",
    "    执行Faster RCNN\n",
    "    注意区分rpn_fg_iou_thresh 和 box_fg_iou_thresh\n",
    "    \"\"\"\n",
    "    def __init__(self, backbone, num_classes=None,  #加上了背景类别数\n",
    "                 # transform parameter\n",
    "                 min_size=800, max_size=1333,      # 预处理resize时限制的最小尺寸与最大尺寸\n",
    "                 image_mean=None, image_std=None,  # 预处理normalize时使用的均值和方差\n",
    "                 # RPN parameters\n",
    "                 rpn_anchor_generator=None, rpn_head=None,\n",
    "                 rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,    # rpn中在nms处理前每个FPN特征层保留的proposal数(根据score)\n",
    "                 rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,  # rpn中在nms处理后总共保留的proposal数\n",
    "                 rpn_nms_thresh=0.7,  # rpn中进行nms处理时使用的iou阈值\n",
    "                 rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,  # rpn计算损失时，采集正负样本设置的阈值\n",
    "                 rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,  # rpn计算损失时采样的样本数，以及正样本占总样本的比例\n",
    "                 rpn_score_thresh=0.0,\n",
    "                 # Box parameters\n",
    "                 box_roi_pool=None, box_head=None, box_predictor=None,\n",
    "                 # 移除低目标概率      fast rcnn中进行nms处理的阈值   对预测结果根据score排序取前100个目标\n",
    "                 box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,\n",
    "                 box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,   # fast rcnn计算误差时，采集正负样本设置的阈值\n",
    "                 box_batch_size_per_image=512, box_positive_fraction=0.25,  # fast rcnn计算误差时采样的样本数，以及正样本占所有样本的比例\n",
    "                 bbox_reg_weights=None):\n",
    "\n",
    "        if not hasattr(backbone, \"out_channels\"):\n",
    "            raise ValueError(\n",
    "                \"backbone需要有out_channels参数\"\n",
    "                \"RPN所有输出层次的特征的out_channels都应该相同\"\n",
    "            )\n",
    "        assert isinstance(rpn_anchor_generator, (AnchorsGenerator, type(None)))\n",
    "        assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None)))\n",
    "\n",
    "        if num_classes is not None:\n",
    "            if box_predictor is not None:\n",
    "                raise ValueError(\"num_classes should be None when box_predictor \"\n",
    "                                 \"is specified\")\n",
    "        else:\n",
    "            if box_predictor is None:\n",
    "                raise ValueError(\"num_classes should not be None when box_predictor \"\n",
    "                                 \"is not specified\")\n",
    "        out_channels = backbone.out_channels\n",
    "\n",
    "        if rpn_anchor_generator is None:\n",
    "            anchor_sizes = ((32,), (64,), (128,), (256,), (512,))   #(32,)是为了让32也是tuple，直接(32)就是一个数字\n",
    "            aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)\n",
    "            rpn_anchor_generator = AnchorsGenerator(\n",
    "                anchor_sizes, aspect_ratios\n",
    "            )\n",
    "        if rpn_head is None:\n",
    "            rpn_head = RPNHead(\n",
    "                out_channels, rpn_anchor_generator.num_anchors_per_location()[0]\n",
    "            )\n",
    "        rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)\n",
    "        rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)\n",
    "\n",
    "        rpn = RegionProposalNetwork(\n",
    "            rpn_anchor_generator, rpn_head,\n",
    "            rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n",
    "            rpn_batch_size_per_image, rpn_positive_fraction,\n",
    "            rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh,\n",
    "            score_thresh=rpn_score_thresh\n",
    "        )\n",
    "\n",
    "        if box_roi_pool is None:\n",
    "            box_roi_pool = MultiScaleRoIAlign(\n",
    "                featmap_names=['0', '1', '2', '3'],\n",
    "                output_size=[7, 7],\n",
    "                sampling_ratio=2\n",
    "            )\n",
    "        if box_head is None:\n",
    "            resolution = box_roi_pool.output_size[0]    #默认等于7\n",
    "            representation_size = 1024\n",
    "            box_head = TwoMLPHead(\n",
    "                out_channels * resolution ** 2,\n",
    "                representation_size\n",
    "            )\n",
    "        if box_predictor is None:\n",
    "            representation_size = 1024\n",
    "            box_predictor = FastRCNNPredictor(\n",
    "                representation_size,\n",
    "                num_classes\n",
    "            )\n",
    "\n",
    "        roi_heads = RoIHeads(\n",
    "            box_roi_pool, box_head, box_predictor,\n",
    "            box_fg_iou_thresh, box_bg_iou_thresh,   # 0.5 0.5\n",
    "            box_batch_size_per_image, box_positive_fraction,\n",
    "            bbox_reg_weights,\n",
    "            box_score_thresh, box_nms_thresh, box_detections_per_img\n",
    "        )\n",
    "\n",
    "        if image_mean is None:\n",
    "            image_mean = [0.485, 0.456, 0.406]\n",
    "        if image_std is None:\n",
    "            image_std = [0.229, 0.224, 0.225]\n",
    "\n",
    "        # 对数据进行标准化，缩放，打包成batch等处理部分\n",
    "        transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)\n",
    "\n",
    "        super(FasterRCNN, self).__init__(backbone, rpn, roi_heads, transform)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 预测"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## fpn"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def create_model(num_classes):\n",
    "    backbone = resnet50_fpn_backbone(norm_layer=torch.nn.BatchNorm2d)\n",
    "    model = FasterRCNN(backbone=backbone, num_classes=num_classes, rpn_score_thresh=0.5)    #score?\n",
    "\n",
    "    return model"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "## vgg16"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "def create_model(num_classes):\n",
    "    from torchvision.models.feature_extraction import create_\n",
    "    backbone = torchvision.models.vgg16_bn(pretrained=True)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "def time_synchronized():\n",
    "    torch.cuda.synchronize() if torch.cuda.is_available() else None\n",
    "    return time.time()\n",
    "\n",
    "def main():\n",
    "    device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "    print(\"using {} device\".format(device))\n",
    "\n",
    "    model = create_model(num_classes=21)\n",
    "    # 载入训练好的模型参数\n",
    "    train_weight = \"./save_weights/model.pth\"\n",
    "    assert os.path.exists(train_weight), \"{} file dose not exist\".format(train_weight)\n",
    "    model.load_state_dict(torch.load(train_weight, map_location=device)[\"model\"])\n",
    "    model.to(device)\n",
    "    #\n",
    "    label_json_path = './pascal_voc_classes.json'"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 训练"
   ],
   "metadata": {
    "collapsed": false
   }
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "VGG(\n",
      "  (features): Sequential(\n",
      "    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (2): ReLU(inplace=True)\n",
      "    (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (5): ReLU(inplace=True)\n",
      "    (6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "    (7): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (8): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (9): ReLU(inplace=True)\n",
      "    (10): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (11): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (12): ReLU(inplace=True)\n",
      "    (13): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "    (14): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (15): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (16): ReLU(inplace=True)\n",
      "    (17): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (18): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (19): ReLU(inplace=True)\n",
      "    (20): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (21): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (22): ReLU(inplace=True)\n",
      "    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "    (24): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (25): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (26): ReLU(inplace=True)\n",
      "    (27): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (28): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (29): ReLU(inplace=True)\n",
      "    (30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (31): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (32): ReLU(inplace=True)\n",
      "    (33): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "    (34): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (35): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (36): ReLU(inplace=True)\n",
      "    (37): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (38): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (39): ReLU(inplace=True)\n",
      "    (40): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (41): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
      "    (42): ReLU(inplace=True)\n",
      "    (43): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  )\n",
      "  (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))\n",
      "  (classifier): Sequential(\n",
      "    (0): Linear(in_features=25088, out_features=4096, bias=True)\n",
      "    (1): ReLU(inplace=True)\n",
      "    (2): Dropout(p=0.5, inplace=False)\n",
      "    (3): Linear(in_features=4096, out_features=4096, bias=True)\n",
      "    (4): ReLU(inplace=True)\n",
      "    (5): Dropout(p=0.5, inplace=False)\n",
      "    (6): Linear(in_features=4096, out_features=1000, bias=True)\n",
      "  )\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "backbone = torchvision.models.vgg16_bn(pretrained=False)\n",
    "print(backbone)\n",
    "backbone = "
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}