{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# ps detection\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "import copy\n",
    "import cv2\n",
    "import numpy as np\n",
    "\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import torchvision\n",
    "\n",
    "from torch import nn\n",
    "from torchvision.ops import MultiScaleRoIAlign\n",
    "from typing import List, Tuple, Dict\n",
    "\n",
    "from my_py_toolkit.file.file_toolkit import *\n",
    "from torch.optim import Adam\n",
    "from torch.utils.data import DataLoader, Dataset, TensorDataset\n",
    "from torchvision.models.detection._utils import overwrite_eps\n",
    "from torchvision._internally_replaced_utils import load_state_dict_from_url\n",
    "\n",
    "from torchvision.models.detection.anchor_utils import AnchorGenerator\n",
    "from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN\n",
    "from torchvision.models.detection.rpn import RPNHead, RegionProposalNetwork\n",
    "# from torchvision.models.detection.roi_heads import RoIHeads\n",
    "from torchvision.models.detection.transform import GeneralizedRCNNTransform\n",
    "from torchvision.models.detection.backbone_utils import resnet_fpn_backbone, _validate_trainable_layers, mobilenet_backbone\n",
    "from torchvision.models.detection import _utils as det_utils\n",
    "from torchvision.models.detection.roi_heads import fastrcnn_loss, maskrcnn_loss, maskrcnn_inference, keypointrcnn_loss, keypointrcnn_inference\n",
    "from torchvision.ops import boxes as box_ops\n",
    "from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def SRM(imgs):\n",
    "    # 第一层滤波器\n",
    "    # 定义三个滤波器,滤波器大小为5x5\n",
    "    # filter1: egde3*3\n",
    "    filter2 = [[0, 0, 0, 0, 0],\n",
    "               [0, -1, 2, -1, 0],\n",
    "               [0, 2, -4, 2, 0],\n",
    "               [0, -1, 2, -1, 0],\n",
    "               [0, 0, 0, 0, 0]]\n",
    "    # filter2：egde5*5\n",
    "    filter1 = [[-1, 2, -2, 2, -1],\n",
    "               [2, -6, 8, -6, 2],\n",
    "               [-2, 8, -12, 8, -2],\n",
    "               [2, -6, 8, -6, 2],\n",
    "               [-1, 2, -2, 2, -1]]\n",
    "    # filter3：一阶线性\n",
    "    filter3 = [[0, 0, 0, 0, 0],\n",
    "               [0, 0, 1, 0, 0],\n",
    "               [0, 0,-2, 0, 0],\n",
    "               [0, 0, 1, 0, 0],\n",
    "               [0, 0, 0, 0, 0]]\n",
    "    # 定义q，将三个滤波器归一化\n",
    "    q = [4.0, 12.0, 2.0]\n",
    "    filter1 = np.asarray(filter1, dtype=float) / 4\n",
    "    filter2 = np.asarray(filter2, dtype=float) / 12\n",
    "    filter3 = np.asarray(filter3, dtype=float) / 2\n",
    "    # 将不同类的滤波器堆叠、处理，得到新滤波器\n",
    "    filters = [[filter1, filter1, filter1], [filter2, filter2, filter2], [filter3, filter3, filter3]]# (3,3,5,5)\n",
    "    #print(np.array(filters).shape)\n",
    "    #filters = np.einsum('klij->klij', filters)  # new_filter(i,j,l,k) = origin_filter(k,l,i,j) # (5,5,3,3)\n",
    "    filters = torch.FloatTensor(filters)    # (3,3,5,5)\n",
    "    # imgs = np.array(imgs, dtype=float)  # (375,500,3)\n",
    "    # #imgs = imgs[:, :, np.newaxis, :]\n",
    "    # #print(\"img shape\", imgs.shape)\n",
    "    # imgs = np.einsum('klij->kjli', imgs)\n",
    "    # #print(\"img shape\", imgs.shape)\n",
    "    # input = torch.tensor(imgs, dtype=torch.float32)\n",
    "    # 未标出的卷积参数：use_cudnn_on_gpu=True, data_format=\"NHWC\", dilations=[1, 1, 1, 1], name=None\n",
    "    # 得到第一层输出：op\n",
    "    #op = tf.nn.conv2d(input, filters, strides=[1, 1, 1, 1], padding='SAME')\n",
    "    # [B, C, H, W], [out, in, H, W]\n",
    "    op1 = F.conv2d(imgs, filters, stride=1, padding=2)\n",
    "    \n",
    "    return op1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "class TwoMLPHead(nn.Module):\n",
    "    \"\"\"\n",
    "    Standard heads for FPN-based models\n",
    "\n",
    "\n",
    "    Args:\n",
    "        in_channels (int): number of input channels\n",
    "        representation_size (int): size of the intermediate representation\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, in_channels, representation_size):\n",
    "        super(TwoMLPHead, self).__init__()\n",
    "\n",
    "        self.fc6 = nn.Linear(in_channels, representation_size)\n",
    "        self.fc7 = nn.Linear(representation_size, representation_size)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x.flatten(start_dim=1)\n",
    "\n",
    "        x = F.relu(self.fc6(x))\n",
    "        x = F.relu(self.fc7(x))\n",
    "\n",
    "        return x\n",
    "\n",
    "\n",
    "class FastRCNNPredictor(nn.Module):\n",
    "    \"\"\"\n",
    "    Standard classification + bounding box regression layers\n",
    "    for Fast R-CNN.\n",
    "\n",
    "    Args:\n",
    "        in_channels (int): number of input channels\n",
    "        num_classes (int): number of output classes (including background)\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, in_channels, num_classes):\n",
    "        super(FastRCNNPredictor, self).__init__()\n",
    "        self.cls_score = nn.Linear(in_channels, num_classes)\n",
    "        self.bbox_pred = nn.Linear(in_channels, num_classes * 4)\n",
    "\n",
    "    def forward(self, x, noise):\n",
    "        if x.dim() == 4:\n",
    "            assert list(x.shape[2:]) == [1, 1]\n",
    "        # x = x.flatten(start_dim=1)\n",
    "        bbox_deltas = self.bbox_pred(x.flatten(start_dim=1))\n",
    "        \n",
    "        # bilinear  pooling\n",
    "        bl = x.transpose(-1, -2) @ noise\n",
    "        bl = bl.abs().sqrt() * bl.sign()\n",
    "        # todo: 这里看下该 normal 哪个维度\n",
    "        b1 = F.normalize(b1, dim=1)\n",
    "        bl = bl.flatten(start_dim=1)\n",
    "        scores = self.cls_score(bl)\n",
    "\n",
    "        return scores, bbox_deltas"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "class RoIHeads(nn.Module):\n",
    "    __annotations__ = {\n",
    "        'box_coder': det_utils.BoxCoder,\n",
    "        'proposal_matcher': det_utils.Matcher,\n",
    "        'fg_bg_sampler': det_utils.BalancedPositiveNegativeSampler,\n",
    "    }\n",
    "\n",
    "    def __init__(self,\n",
    "                 box_roi_pool,\n",
    "                 box_head,\n",
    "                 box_predictor,\n",
    "                 # Faster R-CNN training\n",
    "                 fg_iou_thresh, bg_iou_thresh,\n",
    "                 batch_size_per_image, positive_fraction,\n",
    "                 bbox_reg_weights,\n",
    "                 # Faster R-CNN inference\n",
    "                 score_thresh,\n",
    "                 nms_thresh,\n",
    "                 detections_per_img,\n",
    "                 # Mask\n",
    "                 mask_roi_pool=None,\n",
    "                 mask_head=None,\n",
    "                 mask_predictor=None,\n",
    "                 keypoint_roi_pool=None,\n",
    "                 keypoint_head=None,\n",
    "                 keypoint_predictor=None,\n",
    "                 ):\n",
    "        super(RoIHeads, self).__init__()\n",
    "\n",
    "        self.box_similarity = box_ops.box_iou\n",
    "        # assign ground-truth boxes for each proposal\n",
    "        self.proposal_matcher = det_utils.Matcher(\n",
    "            fg_iou_thresh,\n",
    "            bg_iou_thresh,\n",
    "            allow_low_quality_matches=False)\n",
    "\n",
    "        self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler(\n",
    "            batch_size_per_image,\n",
    "            positive_fraction)\n",
    "\n",
    "        if bbox_reg_weights is None:\n",
    "            bbox_reg_weights = (10., 10., 5., 5.)\n",
    "        self.box_coder = det_utils.BoxCoder(bbox_reg_weights)\n",
    "\n",
    "        self.box_roi_pool = box_roi_pool\n",
    "        self.box_head = box_head\n",
    "        self.box_predictor = box_predictor\n",
    "\n",
    "        self.score_thresh = score_thresh\n",
    "        self.nms_thresh = nms_thresh\n",
    "        self.detections_per_img = detections_per_img\n",
    "\n",
    "        self.mask_roi_pool = mask_roi_pool\n",
    "        self.mask_head = mask_head\n",
    "        self.mask_predictor = mask_predictor\n",
    "\n",
    "        self.keypoint_roi_pool = keypoint_roi_pool\n",
    "        self.keypoint_head = keypoint_head\n",
    "        self.keypoint_predictor = keypoint_predictor\n",
    "\n",
    "    def has_mask(self):\n",
    "        if self.mask_roi_pool is None:\n",
    "            return False\n",
    "        if self.mask_head is None:\n",
    "            return False\n",
    "        if self.mask_predictor is None:\n",
    "            return False\n",
    "        return True\n",
    "\n",
    "    def has_keypoint(self):\n",
    "        if self.keypoint_roi_pool is None:\n",
    "            return False\n",
    "        if self.keypoint_head is None:\n",
    "            return False\n",
    "        if self.keypoint_predictor is None:\n",
    "            return False\n",
    "        return True\n",
    "\n",
    "    def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels):\n",
    "        # type: (List[Tensor], List[Tensor], List[Tensor]) -> Tuple[List[Tensor], List[Tensor]]\n",
    "        matched_idxs = []\n",
    "        labels = []\n",
    "        for proposals_in_image, gt_boxes_in_image, gt_labels_in_image in zip(proposals, gt_boxes, gt_labels):\n",
    "\n",
    "            if gt_boxes_in_image.numel() == 0:\n",
    "                # Background image\n",
    "                device = proposals_in_image.device\n",
    "                clamped_matched_idxs_in_image = torch.zeros(\n",
    "                    (proposals_in_image.shape[0],), dtype=torch.int64, device=device\n",
    "                )\n",
    "                labels_in_image = torch.zeros(\n",
    "                    (proposals_in_image.shape[0],), dtype=torch.int64, device=device\n",
    "                )\n",
    "            else:\n",
    "                #  set to self.box_similarity when https://github.com/pytorch/pytorch/issues/27495 lands\n",
    "                match_quality_matrix = box_ops.box_iou(gt_boxes_in_image, proposals_in_image)\n",
    "                matched_idxs_in_image = self.proposal_matcher(match_quality_matrix)\n",
    "\n",
    "                clamped_matched_idxs_in_image = matched_idxs_in_image.clamp(min=0)\n",
    "\n",
    "                labels_in_image = gt_labels_in_image[clamped_matched_idxs_in_image]\n",
    "                labels_in_image = labels_in_image.to(dtype=torch.int64)\n",
    "\n",
    "                # Label background (below the low threshold)\n",
    "                bg_inds = matched_idxs_in_image == self.proposal_matcher.BELOW_LOW_THRESHOLD\n",
    "                labels_in_image[bg_inds] = 0\n",
    "\n",
    "                # Label ignore proposals (between low and high thresholds)\n",
    "                ignore_inds = matched_idxs_in_image == self.proposal_matcher.BETWEEN_THRESHOLDS\n",
    "                labels_in_image[ignore_inds] = -1  # -1 is ignored by sampler\n",
    "\n",
    "            matched_idxs.append(clamped_matched_idxs_in_image)\n",
    "            labels.append(labels_in_image)\n",
    "        return matched_idxs, labels\n",
    "\n",
    "    def subsample(self, labels):\n",
    "        # type: (List[Tensor]) -> List[Tensor]\n",
    "        sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)\n",
    "        sampled_inds = []\n",
    "        for img_idx, (pos_inds_img, neg_inds_img) in enumerate(\n",
    "            zip(sampled_pos_inds, sampled_neg_inds)\n",
    "        ):\n",
    "            img_sampled_inds = torch.where(pos_inds_img | neg_inds_img)[0]\n",
    "            sampled_inds.append(img_sampled_inds)\n",
    "        return sampled_inds\n",
    "\n",
    "    def add_gt_proposals(self, proposals, gt_boxes):\n",
    "        # type: (List[Tensor], List[Tensor]) -> List[Tensor]\n",
    "        proposals = [\n",
    "            torch.cat((proposal, gt_box))\n",
    "            for proposal, gt_box in zip(proposals, gt_boxes)\n",
    "        ]\n",
    "\n",
    "        return proposals\n",
    "\n",
    "    def check_targets(self, targets):\n",
    "        # type: (Optional[List[Dict[str, Tensor]]]) -> None\n",
    "        assert targets is not None\n",
    "        assert all([\"boxes\" in t for t in targets])\n",
    "        assert all([\"labels\" in t for t in targets])\n",
    "        if self.has_mask():\n",
    "            assert all([\"masks\" in t for t in targets])\n",
    "\n",
    "    def select_training_samples(self,\n",
    "                                proposals,  # type: List[Tensor]\n",
    "                                targets     # type: Optional[List[Dict[str, Tensor]]]\n",
    "                                ):\n",
    "        # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]]\n",
    "        self.check_targets(targets)\n",
    "        assert targets is not None\n",
    "        dtype = proposals[0].dtype\n",
    "        device = proposals[0].device\n",
    "\n",
    "        gt_boxes = [t[\"boxes\"].to(dtype) for t in targets]\n",
    "        gt_labels = [t[\"labels\"] for t in targets]\n",
    "\n",
    "        # append ground-truth bboxes to propos\n",
    "        proposals = self.add_gt_proposals(proposals, gt_boxes)\n",
    "\n",
    "        # get matching gt indices for each proposal\n",
    "        matched_idxs, labels = self.assign_targets_to_proposals(proposals, gt_boxes, gt_labels)\n",
    "        # sample a fixed proportion of positive-negative proposals\n",
    "        sampled_inds = self.subsample(labels)\n",
    "        matched_gt_boxes = []\n",
    "        num_images = len(proposals)\n",
    "        for img_id in range(num_images):\n",
    "            img_sampled_inds = sampled_inds[img_id]\n",
    "            proposals[img_id] = proposals[img_id][img_sampled_inds]\n",
    "            labels[img_id] = labels[img_id][img_sampled_inds]\n",
    "            matched_idxs[img_id] = matched_idxs[img_id][img_sampled_inds]\n",
    "\n",
    "            gt_boxes_in_image = gt_boxes[img_id]\n",
    "            if gt_boxes_in_image.numel() == 0:\n",
    "                gt_boxes_in_image = torch.zeros((1, 4), dtype=dtype, device=device)\n",
    "            matched_gt_boxes.append(gt_boxes_in_image[matched_idxs[img_id]])\n",
    "\n",
    "        regression_targets = self.box_coder.encode(matched_gt_boxes, proposals)\n",
    "        return proposals, matched_idxs, labels, regression_targets\n",
    "\n",
    "    def postprocess_detections(self,\n",
    "                               class_logits,    # type: Tensor\n",
    "                               box_regression,  # type: Tensor\n",
    "                               proposals,       # type: List[Tensor]\n",
    "                               image_shapes     # type: List[Tuple[int, int]]\n",
    "                               ):\n",
    "        # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor]]\n",
    "        device = class_logits.device\n",
    "        num_classes = class_logits.shape[-1]\n",
    "\n",
    "        boxes_per_image = [boxes_in_image.shape[0] for boxes_in_image in proposals]\n",
    "        pred_boxes = self.box_coder.decode(box_regression, proposals)\n",
    "\n",
    "        pred_scores = F.softmax(class_logits, -1)\n",
    "\n",
    "        pred_boxes_list = pred_boxes.split(boxes_per_image, 0)\n",
    "        pred_scores_list = pred_scores.split(boxes_per_image, 0)\n",
    "\n",
    "        all_boxes = []\n",
    "        all_scores = []\n",
    "        all_labels = []\n",
    "        for boxes, scores, image_shape in zip(pred_boxes_list, pred_scores_list, image_shapes):\n",
    "            boxes = box_ops.clip_boxes_to_image(boxes, image_shape)\n",
    "\n",
    "            # create labels for each prediction\n",
    "            labels = torch.arange(num_classes, device=device)\n",
    "            labels = labels.view(1, -1).expand_as(scores)\n",
    "\n",
    "            # remove predictions with the background label\n",
    "            boxes = boxes[:, 1:]\n",
    "            scores = scores[:, 1:]\n",
    "            labels = labels[:, 1:]\n",
    "\n",
    "            # batch everything, by making every class prediction be a separate instance\n",
    "            boxes = boxes.reshape(-1, 4)\n",
    "            scores = scores.reshape(-1)\n",
    "            labels = labels.reshape(-1)\n",
    "\n",
    "            # remove low scoring boxes\n",
    "            inds = torch.where(scores > self.score_thresh)[0]\n",
    "            boxes, scores, labels = boxes[inds], scores[inds], labels[inds]\n",
    "\n",
    "            # remove empty boxes\n",
    "            keep = box_ops.remove_small_boxes(boxes, min_size=1e-2)\n",
    "            boxes, scores, labels = boxes[keep], scores[keep], labels[keep]\n",
    "\n",
    "            # non-maximum suppression, independently done per class\n",
    "            keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh)\n",
    "            # keep only topk scoring predictions\n",
    "            keep = keep[:self.detections_per_img]\n",
    "            boxes, scores, labels = boxes[keep], scores[keep], labels[keep]\n",
    "\n",
    "            all_boxes.append(boxes)\n",
    "            all_scores.append(scores)\n",
    "            all_labels.append(labels)\n",
    "\n",
    "        return all_boxes, all_scores, all_labels\n",
    "\n",
    "    def forward(self,\n",
    "                features,      # type: Dict[str, Tensor]\n",
    "                noise_features,\n",
    "                proposals,     # type: List[Tensor]\n",
    "                image_shapes,  # type: List[Tuple[int, int]]\n",
    "                targets=None   # type: Optional[List[Dict[str, Tensor]]]\n",
    "                ):\n",
    "        # type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]]\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            features (List[Tensor])\n",
    "            proposals (List[Tensor[N, 4]])\n",
    "            image_shapes (List[Tuple[H, W]])\n",
    "            targets (List[Dict])\n",
    "        \"\"\"\n",
    "        if targets is not None:\n",
    "            for t in targets:\n",
    "                # TODO: https://github.com/pytorch/pytorch/issues/26731\n",
    "                floating_point_types = (torch.float, torch.double, torch.half)\n",
    "                assert t[\"boxes\"].dtype in floating_point_types, 'target boxes must of float type'\n",
    "                assert t[\"labels\"].dtype == torch.int64, 'target labels must of int64 type'\n",
    "                if self.has_keypoint():\n",
    "                    assert t[\"keypoints\"].dtype == torch.float32, 'target keypoints must of float type'\n",
    "\n",
    "        if self.training:\n",
    "            proposals, matched_idxs, labels, regression_targets = self.select_training_samples(proposals, targets)\n",
    "        else:\n",
    "            labels = None\n",
    "            regression_targets = None\n",
    "            matched_idxs = None\n",
    "\n",
    "        box_features = self.box_roi_pool(features, proposals, image_shapes)\n",
    "        box_features = self.box_head(box_features)\n",
    "        box_features_noise = self.box_roi_pool(noise_features, proposals, image_shapes)\n",
    "        box_features_noise = self.box_head(box_features_noise)\n",
    "        class_logits, box_regression = self.box_predictor(box_features, box_features_noise)\n",
    "        \n",
    "        \n",
    "\n",
    "        result: List[Dict[str, torch.Tensor]] = []\n",
    "        losses = {}\n",
    "        if self.training:\n",
    "            assert labels is not None and regression_targets is not None\n",
    "            loss_classifier, loss_box_reg = fastrcnn_loss(\n",
    "                class_logits, box_regression, labels, regression_targets)\n",
    "            losses = {\n",
    "                \"loss_classifier\": loss_classifier,\n",
    "                \"loss_box_reg\": loss_box_reg\n",
    "            }\n",
    "        else:\n",
    "            boxes, scores, labels = self.postprocess_detections(class_logits, box_regression, proposals, image_shapes)\n",
    "            num_images = len(boxes)\n",
    "            for i in range(num_images):\n",
    "                result.append(\n",
    "                    {\n",
    "                        \"boxes\": boxes[i],\n",
    "                        \"labels\": labels[i],\n",
    "                        \"scores\": scores[i],\n",
    "                    }\n",
    "                )\n",
    "\n",
    "        if self.has_mask():\n",
    "            mask_proposals = [p[\"boxes\"] for p in result]\n",
    "            if self.training:\n",
    "                assert matched_idxs is not None\n",
    "                # during training, only focus on positive boxes\n",
    "                num_images = len(proposals)\n",
    "                mask_proposals = []\n",
    "                pos_matched_idxs = []\n",
    "                for img_id in range(num_images):\n",
    "                    pos = torch.where(labels[img_id] > 0)[0]\n",
    "                    mask_proposals.append(proposals[img_id][pos])\n",
    "                    pos_matched_idxs.append(matched_idxs[img_id][pos])\n",
    "            else:\n",
    "                pos_matched_idxs = None\n",
    "\n",
    "            if self.mask_roi_pool is not None:\n",
    "                mask_features = self.mask_roi_pool(features, mask_proposals, image_shapes)\n",
    "                mask_features = self.mask_head(mask_features)\n",
    "                mask_logits = self.mask_predictor(mask_features)\n",
    "            else:\n",
    "                raise Exception(\"Expected mask_roi_pool to be not None\")\n",
    "\n",
    "            loss_mask = {}\n",
    "            if self.training:\n",
    "                assert targets is not None\n",
    "                assert pos_matched_idxs is not None\n",
    "                assert mask_logits is not None\n",
    "\n",
    "                gt_masks = [t[\"masks\"] for t in targets]\n",
    "                gt_labels = [t[\"labels\"] for t in targets]\n",
    "                rcnn_loss_mask = maskrcnn_loss(\n",
    "                    mask_logits, mask_proposals,\n",
    "                    gt_masks, gt_labels, pos_matched_idxs)\n",
    "                loss_mask = {\n",
    "                    \"loss_mask\": rcnn_loss_mask\n",
    "                }\n",
    "            else:\n",
    "                labels = [r[\"labels\"] for r in result]\n",
    "                masks_probs = maskrcnn_inference(mask_logits, labels)\n",
    "                for mask_prob, r in zip(masks_probs, result):\n",
    "                    r[\"masks\"] = mask_prob\n",
    "\n",
    "            losses.update(loss_mask)\n",
    "\n",
    "        # keep none checks in if conditional so torchscript will conditionally\n",
    "        # compile each branch\n",
    "        if self.keypoint_roi_pool is not None and self.keypoint_head is not None \\\n",
    "                and self.keypoint_predictor is not None:\n",
    "            keypoint_proposals = [p[\"boxes\"] for p in result]\n",
    "            if self.training:\n",
    "                # during training, only focus on positive boxes\n",
    "                num_images = len(proposals)\n",
    "                keypoint_proposals = []\n",
    "                pos_matched_idxs = []\n",
    "                assert matched_idxs is not None\n",
    "                for img_id in range(num_images):\n",
    "                    pos = torch.where(labels[img_id] > 0)[0]\n",
    "                    keypoint_proposals.append(proposals[img_id][pos])\n",
    "                    pos_matched_idxs.append(matched_idxs[img_id][pos])\n",
    "            else:\n",
    "                pos_matched_idxs = None\n",
    "\n",
    "            keypoint_features = self.keypoint_roi_pool(features, keypoint_proposals, image_shapes)\n",
    "            keypoint_features = self.keypoint_head(keypoint_features)\n",
    "            keypoint_logits = self.keypoint_predictor(keypoint_features)\n",
    "\n",
    "            loss_keypoint = {}\n",
    "            if self.training:\n",
    "                assert targets is not None\n",
    "                assert pos_matched_idxs is not None\n",
    "\n",
    "                gt_keypoints = [t[\"keypoints\"] for t in targets]\n",
    "                rcnn_loss_keypoint = keypointrcnn_loss(\n",
    "                    keypoint_logits, keypoint_proposals,\n",
    "                    gt_keypoints, pos_matched_idxs)\n",
    "                loss_keypoint = {\n",
    "                    \"loss_keypoint\": rcnn_loss_keypoint\n",
    "                }\n",
    "            else:\n",
    "                assert keypoint_logits is not None\n",
    "                assert keypoint_proposals is not None\n",
    "\n",
    "                keypoints_probs, kp_scores = keypointrcnn_inference(keypoint_logits, keypoint_proposals)\n",
    "                for keypoint_prob, kps, r in zip(keypoints_probs, kp_scores, result):\n",
    "                    r[\"keypoints\"] = keypoint_prob\n",
    "                    r[\"keypoints_scores\"] = kps\n",
    "\n",
    "            losses.update(loss_keypoint)\n",
    "\n",
    "        return result, losses\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PSDetector(nn.Module):\n",
    "    def __init__(self, backbone, out_channels=None, num_classes=None,\n",
    "                 # transform parameters\n",
    "                 min_size=800, max_size=1333,\n",
    "                 image_mean=None, image_std=None,\n",
    "                 # RPN parameters\n",
    "                 rpn_anchor_generator=None, rpn_head=None,\n",
    "                 rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000,\n",
    "                 rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000,\n",
    "                 rpn_nms_thresh=0.7,\n",
    "                 rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3,\n",
    "                 rpn_batch_size_per_image=256, rpn_positive_fraction=0.5,\n",
    "                 rpn_score_thresh=0.0,\n",
    "                 # Box parameters\n",
    "                 box_roi_pool=None, box_head=None, box_predictor=None,\n",
    "                 box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100,\n",
    "                 box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5,\n",
    "                 box_batch_size_per_image=512, box_positive_fraction=0.25,\n",
    "                 bbox_reg_weights=None):\n",
    "        super().__init__()\n",
    "        # rgb\n",
    "        self.reg_backbone = backbone\n",
    "        if rpn_anchor_generator is None:\n",
    "            anchor_sizes = ((32,), (64,), (128,), (256,), (512,))\n",
    "            aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)\n",
    "            rpn_anchor_generator = AnchorGenerator(\n",
    "                anchor_sizes, aspect_ratios\n",
    "            )\n",
    "        if rpn_head is None: \n",
    "            rpn_head = RPNHead(\n",
    "                out_channels, rpn_anchor_generator.num_anchors_per_location()[0]\n",
    "            )\n",
    "        \n",
    "        \n",
    "        rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test)\n",
    "        rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test)\n",
    "\n",
    "        self.rpn = RegionProposalNetwork(\n",
    "            rpn_anchor_generator, rpn_head,\n",
    "            rpn_fg_iou_thresh, rpn_bg_iou_thresh,\n",
    "            rpn_batch_size_per_image, rpn_positive_fraction,\n",
    "            rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh,\n",
    "            score_thresh=rpn_score_thresh)\n",
    "\n",
    "        if box_roi_pool is None:\n",
    "            box_roi_pool = MultiScaleRoIAlign(\n",
    "                featmap_names=['0', '1', '2', '3'],\n",
    "                output_size=7,\n",
    "                sampling_ratio=2)\n",
    "\n",
    "        if box_head is None:\n",
    "            resolution = box_roi_pool.output_size[0]\n",
    "            representation_size = 1024\n",
    "            box_head = TwoMLPHead(\n",
    "                out_channels * resolution ** 2,\n",
    "                representation_size)\n",
    "\n",
    "        if box_predictor is None:\n",
    "            representation_size = 1024\n",
    "            box_predictor = FastRCNNPredictor(\n",
    "                representation_size,\n",
    "                num_classes)\n",
    "\n",
    "        self.roi_heads = RoIHeads(\n",
    "            # Box\n",
    "            box_roi_pool, box_head, box_predictor,\n",
    "            box_fg_iou_thresh, box_bg_iou_thresh,\n",
    "            box_batch_size_per_image, box_positive_fraction,\n",
    "            bbox_reg_weights,\n",
    "            box_score_thresh, box_nms_thresh, box_detections_per_img)\n",
    "\n",
    "        if image_mean is None:\n",
    "            image_mean = [0.485, 0.456, 0.406]\n",
    "        if image_std is None:\n",
    "            image_std = [0.229, 0.224, 0.225]\n",
    "        self.transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std)\n",
    "        \n",
    "        \n",
    "        # noise\n",
    "        \n",
    "        self.srm = SRM\n",
    "        self.noise_backbone = copy.deepcopy(backbone)\n",
    "        \n",
    "        \n",
    "        \n",
    "    def forward(self, images, targets=None):\n",
    "\n",
    "        original_image_sizes: List[Tuple[int, int]] = []\n",
    "        for img in images:\n",
    "            val = img.shape[-2:]\n",
    "            assert len(val) == 2\n",
    "            original_image_sizes.append((val[0], val[1]))\n",
    "\n",
    "        images, targets = self.transform(images, targets)\n",
    "\n",
    "        reg_features = self.reg_backbone(images.tensors)\n",
    "        noise_features = self.noise_backbone(self.srm(images.tensors))\n",
    "        \n",
    "        proposals, proposal_losses = self.rpn(images, reg_features, targets) \n",
    "        reg_detections, detector_losses = self.roi_heads(reg_features, noise_features, proposals, images.image_sizes, targets)\n",
    "        \n",
    "        if not self.training:\n",
    "            return reg_detections\n",
    "        else: \n",
    "            losses = {\n",
    "                'proposal_losses': proposal_losses,\n",
    "                'detector_losses': detector_losses\n",
    "            }\n",
    "            return losses\n",
    "         \n",
    "\n",
    "        \n",
    "    \n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# 超参数\n",
    "lr = 1e-3\n",
    "batch_size = 2\n",
    "data_dir = 'F:/VOS/ubutu-vm/resources/datasets/work/train'\n",
    "data_dir = 'F:/Work/aeye/train'\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Downloading: \"https://download.pytorch.org/models/resnet50-0676ba61.pth\" to C:\\Users\\Administrator/.cache\\torch\\hub\\checkpoints\\resnet50-0676ba61.pth\n",
      "100%|██████████| 97.8M/97.8M [00:39<00:00, 2.60MB/s]\n"
     ]
    }
   ],
   "source": [
    "model = PSDetector(resnet_fpn_backbone('resnet50', True, trainable_layers=3),\n",
    "                   256,\n",
    "                   2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "params = [p for p in model.parameters() if p.requires_grad]\n",
    "opt = Adam(params, lr=lr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# dataloader\n",
    "# prerpocess data\n",
    "images = []\n",
    "boxes = []\n",
    "labels = []\n",
    "paths = []\n",
    "for name in ['gen_train', 'some_true']:\n",
    "    paths.extend(get_file_paths(f'{data_dir}/{name}'))\n",
    "boxes_info = readjson(f'{data_dir}/fake_box.json')\n",
    "for p in paths:\n",
    "    img = cv2.imread(p) / 255\n",
    "    img = cv2.resize(img, (256, 256))\n",
    "    img = torch.tensor(img).permute(2, 0, 1).to(torch.float32)\n",
    "    b_info = boxes_info.get(get_file_name(p), [0, 1, 0, 1, 0])\n",
    "    b_info[:4] = [b_info[0], b_info[2], b_info[1], b_info[3]]\n",
    "    box = torch.tensor(b_info[:4])\n",
    "    lable = torch.tensor(b_info[4])\n",
    "    images.append(img)\n",
    "    boxes.append(box)\n",
    "    labels.append(lable)\n",
    "dataloader = DataLoader(TensorDataset(torch.stack(images), torch.stack(boxes), torch.stack(labels)), batch_size=batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|          | 0/98 [02:24<?, ?it/s]\n",
      "epoch 0 loss 1.1348621845245361:   1%|          | 1/98 [00:50<1:22:05, 50.78s/it]\n",
      "  0%|          | 0/98 [00:00<?, ?it/s]"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-55-3e9bf9d52f02>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      6\u001b[0m         \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      7\u001b[0m         output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} \n\u001b[1;32m----> 8\u001b[1;33m                       for i in range(batch_size)])\n\u001b[0m\u001b[0;32m      9\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mitems\u001b[0m \u001b[1;32min\u001b[0m \u001b[0moutput\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     10\u001b[0m             \u001b[1;32mfor\u001b[0m \u001b[0m_\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mv\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mitems\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Users\\EDY\\anaconda3\\envs\\faceparsing\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m   1100\u001b[0m         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m   1101\u001b[0m                 or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1102\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1103\u001b[0m         \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1104\u001b[0m         \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m<ipython-input-49-a2be990e7d5b>\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, images, targets)\u001b[0m\n\u001b[0;32m     95\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     96\u001b[0m         \u001b[0mreg_features\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreg_backbone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimages\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtensors\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 97\u001b[1;33m         \u001b[0mnoise_features\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnoise_backbone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msrm\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimages\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtensors\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     98\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     99\u001b[0m         \u001b[0mproposals\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mproposal_losses\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrpn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimages\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mreg_features\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtargets\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Users\\EDY\\anaconda3\\envs\\faceparsing\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m   1100\u001b[0m         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m   1101\u001b[0m                 or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1102\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1103\u001b[0m         \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1104\u001b[0m         \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Users\\EDY\\anaconda3\\envs\\faceparsing\\lib\\site-packages\\torchvision\\models\\detection\\backbone_utils.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m     43\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     44\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbody\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 45\u001b[1;33m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfpn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     46\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     47\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Users\\EDY\\anaconda3\\envs\\faceparsing\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m   1100\u001b[0m         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m   1101\u001b[0m                 or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1102\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1103\u001b[0m         \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1104\u001b[0m         \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Users\\EDY\\anaconda3\\envs\\faceparsing\\lib\\site-packages\\torchvision\\ops\\feature_pyramid_network.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m    147\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    148\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0midx\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m-\u001b[0m \u001b[1;36m2\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 149\u001b[1;33m             \u001b[0minner_lateral\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget_result_from_inner_blocks\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0midx\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0midx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    150\u001b[0m             \u001b[0mfeat_shape\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0minner_lateral\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    151\u001b[0m             \u001b[0minner_top_down\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlast_inner\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msize\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mfeat_shape\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"nearest\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Users\\EDY\\anaconda3\\envs\\faceparsing\\lib\\site-packages\\torchvision\\ops\\feature_pyramid_network.py\u001b[0m in \u001b[0;36mget_result_from_inner_blocks\u001b[1;34m(self, x, idx)\u001b[0m\n\u001b[0;32m    107\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minner_blocks\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    108\u001b[0m             \u001b[1;32mif\u001b[0m \u001b[0mi\u001b[0m \u001b[1;33m==\u001b[0m \u001b[0midx\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 109\u001b[1;33m                 \u001b[0mout\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    110\u001b[0m             \u001b[0mi\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[1;36m1\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    111\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mout\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Users\\EDY\\anaconda3\\envs\\faceparsing\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m   1100\u001b[0m         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m   1101\u001b[0m                 or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1102\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1103\u001b[0m         \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1104\u001b[0m         \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Users\\EDY\\anaconda3\\envs\\faceparsing\\lib\\site-packages\\torch\\nn\\modules\\conv.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m    444\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    445\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 446\u001b[1;33m         \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_conv_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    447\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    448\u001b[0m \u001b[1;32mclass\u001b[0m \u001b[0mConv3d\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0m_ConvNd\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mD:\\Users\\EDY\\anaconda3\\envs\\faceparsing\\lib\\site-packages\\torch\\nn\\modules\\conv.py\u001b[0m in \u001b[0;36m_conv_forward\u001b[1;34m(self, input, weight, bias)\u001b[0m\n\u001b[0;32m    441\u001b[0m                             _pair(0), self.dilation, self.groups)\n\u001b[0;32m    442\u001b[0m         return F.conv2d(input, weight, bias, self.stride,\n\u001b[1;32m--> 443\u001b[1;33m                         self.padding, self.dilation, self.groups)\n\u001b[0m\u001b[0;32m    444\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    445\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "\n",
    "model.train()\n",
    "for epoch in range(10):\n",
    "    train_bar = tqdm(range(len(dataloader)))\n",
    "    for i, (imgs, boxes, labels) in enumerate(dataloader):\n",
    "        opt.zero_grad()\n",
    "        loss = 0\n",
    "        output = model(imgs, [{'boxes': boxes[i][None], 'labels': labels[i][None]} \n",
    "                      for i in range(batch_size)])\n",
    "        for k, items in output.items():\n",
    "            for _, v in items.items():\n",
    "                loss += v\n",
    "        loss.backward()\n",
    "        opt.step()\n",
    "        train_bar.set_description(f'epoch {epoch} loss {loss.item()}')\n",
    "        train_bar.update(1)\n",
    "        break\n",
    "    torch.save(model.state_dict(), f'./model/model_{epoch}.pth')\n",
    "    train_bar.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'proposal_losses': {'loss_objectness': tensor(0.7048, grad_fn=<BinaryCrossEntropyWithLogitsBackward0>),\n",
       "  'loss_rpn_box_reg': tensor(0.0621, grad_fn=<DivBackward0>)},\n",
       " 'detector_losses': {'loss_classifier': tensor(0.3733, grad_fn=<NllLossBackward0>),\n",
       "  'loss_box_reg': tensor(2.7490e-05, grad_fn=<DivBackward0>)}}"
      ]
     },
     "execution_count": 52,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [],
   "source": [
    "a = {}\n",
    "a.update(output)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'proposal_losses': {'loss_objectness': tensor(0.7048, grad_fn=<BinaryCrossEntropyWithLogitsBackward0>),\n",
       "  'loss_rpn_box_reg': tensor(0.0621, grad_fn=<DivBackward0>)},\n",
       " 'detector_losses': {'loss_classifier': tensor(0.3733, grad_fn=<NllLossBackward0>),\n",
       "  'loss_box_reg': tensor(2.7490e-05, grad_fn=<DivBackward0>)}}"
      ]
     },
     "execution_count": 54,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "ename": "SyntaxError",
     "evalue": "invalid syntax (<ipython-input-1-74dc6aa724ed>, line 2)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;36m  File \u001b[1;32m\"<ipython-input-1-74dc6aa724ed>\"\u001b[1;36m, line \u001b[1;32m2\u001b[0m\n\u001b[1;33m    [0 for v in a if v==1 else 2]\u001b[0m\n\u001b[1;37m                             ^\u001b[0m\n\u001b[1;31mSyntaxError\u001b[0m\u001b[1;31m:\u001b[0m invalid syntax\n"
     ]
    }
   ],
   "source": [
    "a = [1]\n",
    "[0 for v in a if v==1 else 2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "e5de2b164caba643ef9a61f873b15635f0a5573c139dd90809bc6d6991777cde"
  },
  "kernelspec": {
   "display_name": "Python 3.6.5 ('faceparsing')",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
