{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn as nn\n",
    "import numpy as np\n",
    "class FasterRCNN(nn.Module):\n",
    "    n_classes = 21\n",
    "    classes = np.asarray(['__background__',\n",
    "                       'aeroplane', 'bicycle', 'bird', 'boat',\n",
    "                       'bottle', 'bus', 'car', 'cat', 'chair',\n",
    "                       'cow', 'diningtable', 'dog', 'horse',\n",
    "                       'motorbike', 'person', 'pottedplant',\n",
    "                       'sheep', 'sofa', 'train', 'tvmonitor'])\n",
    "    PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])\n",
    "    SCALES = (600,)\n",
    "    MAX_SIZE = 1000\n",
    "\n",
    "    def __init__(self, classes=None, debug=False):\n",
    "        super(FasterRCNN, self).__init__()\n",
    "\n",
    "        if classes is not None:\n",
    "            self.classes = classes\n",
    "            self.n_classes = len(classes)\n",
    "\n",
    "        self.rpn = RPN()\n",
    "        self.roi_pool = RoIPool(7, 7, 1.0/16)\n",
    "        self.fc6 = FC(512 * 7 * 7, 4096)\n",
    "        self.fc7 = FC(4096, 4096)\n",
    "        self.score_fc = FC(4096, self.n_classes, relu=False)\n",
    "        self.bbox_fc = FC(4096, self.n_classes * 4, relu=False)\n",
    "\n",
    "        # loss\n",
    "        self.cross_entropy = None\n",
    "        self.loss_box = None\n",
    "\n",
    "\n",
    "\n",
    "    @property\n",
    "    def loss(self):\n",
    "        return self.cross_entropy + self.loss_box * 10\n",
    "\n",
    "    def forward(self, im_data, im_info, gt_boxes=None, gt_ishard=None, dontcare_areas=None):\n",
    "        features, rois = self.rpn(im_data, im_info, gt_boxes, gt_ishard, dontcare_areas)\n",
    "\n",
    "        if self.training:\n",
    "            roi_data = self.proposal_target_layer(rois, gt_boxes, gt_ishard, dontcare_areas, self.n_classes)\n",
    "            rois = roi_data[0]\n",
    "\n",
    "        # roi pool\n",
    "        pooled_features = self.roi_pool(features, rois)\n",
    "        x = pooled_features.view(pooled_features.size()[0], -1)\n",
    "        x = self.fc6(x)\n",
    "        x = F.dropout(x, training=self.training)\n",
    "        x = self.fc7(x)\n",
    "        x = F.dropout(x, training=self.training)\n",
    "\n",
    "        cls_score = self.score_fc(x)\n",
    "        cls_prob = F.softmax(cls_score)\n",
    "        bbox_pred = self.bbox_fc(x)\n",
    "\n",
    "        if self.training:\n",
    "            self.cross_entropy, self.loss_box = self.build_loss(cls_score, bbox_pred, roi_data)\n",
    "\n",
    "        return cls_prob, bbox_pred, rois"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Dataset:\n",
    "    def __init__(self, opt):\n",
    "        self.opt = opt\n",
    "        self.db = VOCBboxDataset(opt.voc_data_dir)\n",
    "        #定义原始数据的来源如果是自定义数据可以尝试设置数据的列表，然后利用一个通用的读取器来读取数据\n",
    "        self.tsf = Transform(opt.min_size, opt.max_size)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        image, bboxlist, classlist= self.db.get_example(idx)\n",
    "        img, bbox, label, scale = self.tsf((image, bboxlist, classlist))#对原始数据的变换操作\n",
    "        # TODO: check whose stride is negative to fix this instead copy all\n",
    "        # some of the strides of a given numpy array are negative.\n",
    "        return img.copy(), bbox.copy(), label.copy(), scale\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.db)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Transform(object):\n",
    "\n",
    "    def __init__(self, min_size=600, max_size=1000):\n",
    "        self.min_size = min_size\n",
    "        self.max_size = max_size\n",
    "\n",
    "    def __call__(self, in_data):\n",
    "        img, bbox, label = in_data\n",
    "        _, H, W = img.shape\n",
    "        img = preprocess(img, self.min_size, self.max_size)\n",
    "        _, o_H, o_W = img.shape\n",
    "        scale = o_H / H\n",
    "        bbox = util.resize_bbox(bbox, (H, W), (o_H, o_W))\n",
    "\n",
    "        # horizontally flip\n",
    "        img, params = util.random_flip(\n",
    "            img, x_random=True, return_param=True)\n",
    "        bbox = util.flip_bbox(\n",
    "            bbox, (o_H, o_W), x_flip=params['x_flip'])\n",
    "\n",
    "        return img, bbox, label, scale"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BasicBlock(nn.Module):\n",
    "    expansion = 1\n",
    "    def __init__(self, inplanes, planes, stride=1, downsample=None):\n",
    "        super(BasicBlock, self).__init__()\n",
    "        self.conv1 = conv3x3(inplanes, planes, stride)\n",
    "        self.bn1 = nn.BatchNorm2d(planes)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.conv2 = conv3x3(planes, planes)\n",
    "        self.bn2 = nn.BatchNorm2d(planes)\n",
    "        self.downsample = downsample\n",
    "        self.stride = stride\n",
    "\n",
    "    def forward(self, x):\n",
    "        identity = x\n",
    "\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "\n",
    "        if self.downsample is not None:\n",
    "            identity = self.downsample(x)\n",
    "\n",
    "        out += identity\n",
    "        out = self.relu(out)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Bottleneck(nn.Module):\n",
    "    expansion = 4\n",
    "    def __init__(self, inplanes, planes, stride=1, downsample=None):\n",
    "        super(Bottleneck, self).__init__()\n",
    "        self.conv1 = conv1x1(inplanes, planes)\n",
    "        self.bn1 = nn.BatchNorm2d(planes)\n",
    "        self.conv2 = conv3x3(planes, planes, stride)\n",
    "        self.bn2 = nn.BatchNorm2d(planes)\n",
    "        self.conv3 = conv1x1(planes, planes * self.expansion)\n",
    "        self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.downsample = downsample\n",
    "        self.stride = stride\n",
    "\n",
    "    def forward(self, x):\n",
    "        identity = x\n",
    "\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv3(out)\n",
    "        out = self.bn3(out)\n",
    "        \n",
    "        if self.downsample is not None:\n",
    "            identity = self.downsample(x)\n",
    "        out += identity\n",
    "        out = self.relu(out)\n",
    "\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "class ResNet(nn.Module):\n",
    "\n",
    "    def __init__(self, block, layers, num_classes=1000):\n",
    "        self.inplanes = 64\n",
    "        super(ResNet, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n",
    "                               bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(64)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n",
    "        \n",
    "        self.layer1 = self._make_layer(block, 64, layers[0])\n",
    "        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n",
    "        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n",
    "        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n",
    "        \n",
    "        self.avgpool = nn.AvgPool2d(7, stride=1)\n",
    "        self.fc = nn.Linear(512 * block.expansion, num_classes)\n",
    "\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n",
    "                m.weight.data.normal_(0, math.sqrt(2. / n))\n",
    "            elif isinstance(m, nn.BatchNorm2d):\n",
    "                m.weight.data.fill_(1)\n",
    "                m.bias.data.zero_()\n",
    "\n",
    "    def _make_layer(self, block, planes, blocks, stride=1):\n",
    "        downsample = None\n",
    "        if stride != 1 or self.inplanes != planes * block.expansion:\n",
    "            downsample = nn.Sequential(\n",
    "                nn.Conv2d(self.inplanes, planes * block.expansion,\n",
    "                          kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(planes * block.expansion),\n",
    "            )\n",
    "\n",
    "        layers = []\n",
    "        layers.append(block(self.inplanes, planes, stride, downsample))\n",
    "        self.inplanes = planes * block.expansion\n",
    "        for i in range(1, blocks):\n",
    "            layers.append(block(self.inplanes, planes))\n",
    "\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.bn1(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.maxpool(x)\n",
    "\n",
    "        x = self.layer1(x)\n",
    "        x = self.layer2(x)\n",
    "        x = self.layer3(x)\n",
    "        x = self.layer4(x)\n",
    "\n",
    "        x = self.avgpool(x)\n",
    "        x = x.view(x.size(0), -1)\n",
    "        x = self.fc(x)\n",
    "\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def proposal_layer(rpn_cls_prob_reshape, rpn_bbox_pred, im_info, cfg_key, _feat_stride=[16, ],\n",
    "                   anchor_scales=[8, 16, 32]):\n",
    "    \"\"\"\n",
    "    Parameters\n",
    "    ----------\n",
    "    rpn_cls_prob_reshape: (1 , H , W , Ax2) outputs of RPN, prob of bg or fg\n",
    "                         NOTICE: the old version is ordered by (1, H, W, 2, A) !!!!\n",
    "    rpn_bbox_pred: (1 , H , W , Ax4), rgs boxes output of RPN\n",
    "    im_info: a list of [image_height, image_width, scale_ratios]\n",
    "    cfg_key: 'TRAIN' or 'TEST'\n",
    "    _feat_stride: the downsampling ratio of feature map to the original input image\n",
    "    anchor_scales: the scales to the basic_anchor (basic anchor is [16, 16])\n",
    "    ----------\n",
    "    Returns\n",
    "    ----------\n",
    "    rpn_rois : (1 x H x W x A, 5) e.g. [0, x1, y1, x2, y2]\n",
    "\n",
    "    \"\"\"\n",
    "    \n",
    "    #注意在这个位置就生成了预置框了\n",
    "    _anchors = generate_anchors(scales=np.array(anchor_scales))\n",
    "    \n",
    "    _num_anchors = _anchors.shape[0]\n",
    "    # rpn_cls_prob_reshape = np.transpose(rpn_cls_prob_reshape,[0,3,1,2]) #-> (1 , 2xA, H , W)\n",
    "    # rpn_bbox_pred = np.transpose(rpn_bbox_pred,[0,3,1,2])              # -> (1 , Ax4, H , W)\n",
    "\n",
    "    # rpn_cls_prob_reshape = np.transpose(np.reshape(rpn_cls_prob_reshape,[1,rpn_cls_prob_reshape.shape[0],rpn_cls_prob_reshape.shape[1],rpn_cls_prob_reshape.shape[2]]),[0,3,2,1])\n",
    "    # rpn_bbox_pred = np.transpose(rpn_bbox_pred,[0,3,2,1])\n",
    "    im_info = im_info[0]\n",
    "\n",
    "    assert rpn_cls_prob_reshape.shape[0] == 1, \\\n",
    "        'Only single item batches are supported'\n",
    "    # cfg_key = str(self.phase) # either 'TRAIN' or 'TEST'\n",
    "    # cfg_key = 'TEST'\n",
    "    pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N\n",
    "    post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N\n",
    "    \n",
    "    #这个阈值很重要\n",
    "    nms_thresh = cfg[cfg_key].RPN_NMS_THRESH\n",
    "    \n",
    "    \n",
    "    min_size = cfg[cfg_key].RPN_MIN_SIZE\n",
    "\n",
    "    # the first set of _num_anchors channels are bg probs\n",
    "    # the second set are the fg probs, which we want\n",
    "    scores = rpn_cls_prob_reshape[:, _num_anchors:, :, :]\n",
    "    bbox_deltas = rpn_bbox_pred\n",
    "    # im_info = bottom[2].data[0, :]\n",
    "\n",
    "    # 1. Generate proposals from bbox deltas and shifted anchors\n",
    "    height, width = scores.shape[-2:]\n",
    "\n",
    "\n",
    "    # Enumerate all shifts\n",
    "    shift_x = np.arange(0, width) * _feat_stride\n",
    "    shift_y = np.arange(0, height) * _feat_stride\n",
    "    shift_x, shift_y = np.meshgrid(shift_x, shift_y)\n",
    "    shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),\n",
    "                        shift_x.ravel(), shift_y.ravel())).transpose()\n",
    "\n",
    "    # Enumerate all shifted anchors:\n",
    "    #\n",
    "    # add A anchors (1, A, 4) to\n",
    "    # cell K shifts (K, 1, 4) to get\n",
    "    # shift anchors (K, A, 4)\n",
    "    # reshape to (K*A, 4) shifted anchors\n",
    "    A = _num_anchors\n",
    "    K = shifts.shape[0]\n",
    "    anchors = _anchors.reshape((1, A, 4)) + \\\n",
    "              shifts.reshape((1, K, 4)).transpose((1, 0, 2))\n",
    "    anchors = anchors.reshape((K * A, 4))\n",
    "\n",
    "    # Transpose and reshape predicted bbox transformations to get them\n",
    "    # into the same order as the anchors:\n",
    "    #\n",
    "    # bbox deltas will be (1, 4 * A, H, W) format\n",
    "    # transpose to (1, H, W, 4 * A)\n",
    "    # reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)\n",
    "    # in slowest to fastest order\n",
    "    bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))\n",
    "\n",
    "    # Same story for the scores:\n",
    "    #\n",
    "    # scores are (1, A, H, W) format\n",
    "    # transpose to (1, H, W, A)\n",
    "    # reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)\n",
    "    scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))\n",
    "\n",
    "    # Convert anchors into proposals via bbox transformations\n",
    "    proposals = bbox_transform_inv(anchors, bbox_deltas)\n",
    "\n",
    "    # 2. clip predicted boxes to image\n",
    "    proposals = clip_boxes(proposals, im_info[:2])\n",
    "\n",
    "    # 3. remove predicted boxes with either height or width < threshold\n",
    "    # (NOTE: convert min_size to input image scale stored in im_info[2])\n",
    "    keep = _filter_boxes(proposals, min_size * im_info[2])\n",
    "    proposals = proposals[keep, :]\n",
    "    scores = scores[keep]\n",
    "\n",
    "    # # remove irregular boxes, too fat too tall\n",
    "    # keep = _filter_irregular_boxes(proposals)\n",
    "    # proposals = proposals[keep, :]\n",
    "    # scores = scores[keep]\n",
    "\n",
    "    # 4. sort all (proposal, score) pairs by score from highest to lowest\n",
    "    # 5. take top pre_nms_topN (e.g. 6000)\n",
    "    order = scores.ravel().argsort()[::-1]\n",
    "    if pre_nms_topN > 0:\n",
    "        order = order[:pre_nms_topN]\n",
    "    proposals = proposals[order, :]\n",
    "    scores = scores[order]\n",
    "\n",
    "    # 6. apply nms (e.g. threshold = 0.7)\n",
    "    # 7. take after_nms_topN (e.g. 300)\n",
    "    # 8. return the top proposals (-> RoIs top)\n",
    "    keep = nms(np.hstack((proposals, scores)), nms_thresh)\n",
    "\n",
    "\n",
    "    if post_nms_topN > 0:\n",
    "        keep = keep[:post_nms_topN]\n",
    "    proposals = proposals[keep, :]\n",
    "    scores = scores[keep]\n",
    "\n",
    "    # Output rois blob\n",
    "    # Our RPN implementation only supports a single input image, so all\n",
    "    # batch inds are 0\n",
    "    batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)\n",
    "    blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))\n",
    "    return blob\n",
    "    # top[0].reshape(*(blob.shape))\n",
    "    # top[0].data[...] = blob\n",
    "\n",
    "    # [Optional] output scores blob\n",
    "    # if len(top) > 1:\n",
    "    #    top[1].reshape(*(scores.shape))\n",
    "    #    top[1].data[...] = scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "#torch.numel() 表示一个张量总元素的个数\n",
    "#torch.clamp(min, max) 设置上下限\n",
    "#tensor.item() 把tensor元素取出作为numpy数字\n",
    "\n",
    "def nms(self, bboxes, scores, threshold=0.5):\n",
    "        x1 = bboxes[:,0]\n",
    "        y1 = bboxes[:,1]\n",
    "        x2 = bboxes[:,2]\n",
    "        y2 = bboxes[:,3]\n",
    "        areas = (x2-x1)*(y2-y1)   # [N,] 每个bbox的面积\n",
    "        _, order = scores.sort(0, descending=True)    # 降序排列\n",
    "\n",
    "        keep = []\n",
    "        while order.numel() > 0:       # torch.numel()返回张量元素个数\n",
    "            if order.numel() == 1:     # 保留框只剩一个\n",
    "                i = order.item()\n",
    "                keep.append(i)\n",
    "                break\n",
    "            else:\n",
    "                i = order[0].item()    # 保留scores最大的那个框box[i]\n",
    "                keep.append(i)\n",
    "\n",
    "            # 计算box[i]与其余各框的IOU(思路很好)\n",
    "            xx1 = x1[order[1:]].clamp(min=x1[i])   # [N-1,]\n",
    "            yy1 = y1[order[1:]].clamp(min=y1[i])\n",
    "            xx2 = x2[order[1:]].clamp(max=x2[i])\n",
    "            yy2 = y2[order[1:]].clamp(max=y2[i])\n",
    "            inter = (xx2-xx1).clamp(min=0) * (yy2-yy1).clamp(min=0)   # [N-1,]\n",
    "\n",
    "            iou = inter / (areas[i]+areas[order[1:]]-inter)  # [N-1,]\n",
    "            idx = (iou <= threshold).nonzero().squeeze() # 注意此时idx为[N-1,] 而order为[N,]\n",
    "            if idx.numel() == 0:\n",
    "                break\n",
    "            order = order[idx+1]  # 修补索引之间的差值\n",
    "        return torch.LongTensor(keep)   # Pytorch的索引值为LongTensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  },
  "tianchi_metadata": {
   "competitions": [],
   "datasets": [],
   "description": "",
   "notebookId": "60370",
   "source": "ailab"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
