{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "outputs": [],
   "source": [
    "import torch\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import torchvision\n",
    "import xml.etree.ElementTree as ET\n",
    "from skimage import io,transform\n",
    "import PIL\n",
    "from PIL import Image\n",
    "import torch\n",
    "import os\n",
    "import torch.utils.data.dataset as Dataset\n",
    "import torchvision\n",
    "from torchvision.models.detection import FasterRCNN\n",
    "from torchvision.models.detection.rpn import AnchorGenerator\n",
    "%matplotlib inline"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "outputs": [],
   "source": [
    "anno_path = r\"F:\\VOCtrainval_06-Nov-2007\\VOCdevkit\\VOC2007\\Annotations\\000005.xml\""
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "outputs": [],
   "source": [
    "# import xml.etree.ElementTree as ET\n",
    "# tree = ET.parse(anno_path)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "outputs": [],
   "source": [
    "# root = tree.getroot()\n",
    "# for node in root.findall(\"object\"):\n",
    "#     label_str = node[0].text\n",
    "#     bndbox = node.find(\"bndbox\")\n",
    "#     x1 = float(bndbox.find(\"xmin\").text)\n",
    "#     y1 = float(bndbox.find(\"ymin\").text)\n",
    "#     x2 = float(bndbox.find(\"xmax\").text)\n",
    "#     y2 = float(bndbox.find(\"ymax\").text)\n",
    "#     print(label_str,x1,y1,x2,y2)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "outputs": [],
   "source": [
    "label_str2int_dict = {\n",
    "    'aeroplane':1,\n",
    "    'bicycle':2,\n",
    "    'bird':3,\n",
    "    'boat':4,\n",
    "    'bottle':5,\n",
    "    'bus':6,\n",
    "    'car':7,\n",
    "    'cat':8,\n",
    "    'chair':9,\n",
    "    'cow':10,\n",
    "    'diningtable':11,\n",
    "    'dog':12,\n",
    "    'horse':13,\n",
    "    'motorbike':14,\n",
    "    'person':15,\n",
    "    'pottedplant':16,\n",
    "    'sheep':17,\n",
    "    'sofa':18,\n",
    "    'tvmonitor':19,\n",
    "    'train':20\n",
    "}"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "outputs": [],
   "source": [
    "class Rescale(object):\n",
    "    \"\"\"Rescale the image in a sample to a given size.\n",
    "\n",
    "    Args:\n",
    "        output_size (tuple or int): Desired output size. If tuple, output is\n",
    "            matched to output_size. If int, smaller of image edges is matched\n",
    "            to output_size keeping aspect ratio the same.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, output_size):\n",
    "        assert isinstance(output_size, (int, tuple))\n",
    "        self.output_size = output_size\n",
    "\n",
    "    def __call__(self, sample):\n",
    "        image,target = sample\n",
    "        labels = target[\"labels\"]\n",
    "        bboxes = target[\"boxes\"]\n",
    "\n",
    "        h, w = image.shape[:2]\n",
    "        if isinstance(self.output_size, int):\n",
    "            if h > w:\n",
    "                new_h, new_w = self.output_size * h / w, self.output_size\n",
    "            else:\n",
    "                new_h, new_w = self.output_size, self.output_size * w / h\n",
    "        else:\n",
    "            new_h, new_w = self.output_size\n",
    "\n",
    "        new_h, new_w = int(new_h), int(new_w)\n",
    "        new_img = transform.resize(image, (new_h, new_w))\n",
    "\n",
    "        # h and w are swapped for landmarks because for images,\n",
    "        # x and y axes are axis 1 and 0 respectively\n",
    "\n",
    "        bboxes[:,[0,2]] = bboxes[:,[0,2]] * (new_w / w)\n",
    "        bboxes[:,[1,3]] = bboxes[:,[1,3]] * (new_h / h)\n",
    "        target[\"boxes\"] = bboxes\n",
    "        target[\"labels\"] = labels\n",
    "        return (new_img,target)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "outputs": [],
   "source": [
    "class ToTensor(object):\n",
    "    \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n",
    "\n",
    "    def __call__(self, sample):\n",
    "        image,target = sample\n",
    "        labels = target[\"labels\"]\n",
    "        bboxes = target[\"boxes\"]\n",
    "\n",
    "        # swap color axis because\n",
    "        # numpy image: H x W x C\n",
    "        # torch image: C X H X W\n",
    "        image = image.transpose((2, 0, 1))\n",
    "        # image = image.transpose(( 0, 1, 2))\n",
    "        image_tensor = torch.from_numpy(image).float()\n",
    "        # image_tensor = image_tensor.unsqueeze(0)\n",
    "        target[\"labels\"] = torch.from_numpy(labels).long()\n",
    "        target[\"boxes\"] = torch.from_numpy(bboxes).float()\n",
    "        return (image_tensor,target)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "outputs": [],
   "source": [
    "# import torch.utils.data.dataset as Dataset\n",
    "class VocDataset(torch.utils.data.Dataset):\n",
    "\n",
    "    def __init__(self,jpeg_path,annotation_path,list_file_path,transform=None):\n",
    "        super(VocDataset,self).__init__()\n",
    "        self.img_id_list = []\n",
    "        self.jpeg_path = jpeg_path\n",
    "        self.annotation_path = annotation_path\n",
    "        self.transform = transform\n",
    "        with open(list_file_path,\"r\") as f:\n",
    "            self.img_id_list = [line.strip() for line in f.readlines()]\n",
    "\n",
    "    def get_annotation_file_name(self,img_id):\n",
    "        return os.path.join(self.annotation_path,str(img_id) + \".xml\")\n",
    "\n",
    "    def get_jpeg_file_name(self,img_id):\n",
    "        return os.path.join(self.jpeg_path,str(img_id) + \".jpg\")\n",
    "\n",
    "    def get_image_from_img_id(self,img_id)-> PIL.Image.Image:\n",
    "        image = io.imread(self.get_jpeg_file_name(img_id))\n",
    "        return image\n",
    "\n",
    "    def get_target_from_img_id(self,img_id) -> dict:\n",
    "        tree = ET.parse(self.get_annotation_file_name(img_id))\n",
    "        root = tree.getroot()\n",
    "        labels = []\n",
    "        bboxes = []\n",
    "        for node in root.findall(\"object\"):\n",
    "            label_str = node[0].text\n",
    "            label_int = label_str2int_dict[label_str]\n",
    "            bndbox = node.find(\"bndbox\")\n",
    "            x1 = float(bndbox.find(\"xmin\").text)\n",
    "            y1 = float(bndbox.find(\"ymin\").text)\n",
    "            x2 = float(bndbox.find(\"xmax\").text)\n",
    "            y2 = float(bndbox.find(\"ymax\").text)\n",
    "            labels.append(label_int)\n",
    "            bboxes.append([x1,y1,x2,y2])\n",
    "        labels_np = np.asarray(labels)\n",
    "        bboxes_np = np.asarray(bboxes)\n",
    "\n",
    "        return {\n",
    "            \"boxes\":bboxes_np,\n",
    "            \"labels\":labels_np\n",
    "        }\n",
    "\n",
    "    def __getitem__(self, item):\n",
    "        img_id = self.img_id_list[item]\n",
    "        # img_tensor = self.get_tensor_from_img_id(img_id)\n",
    "        # img_tensor = img_tensor.unsqueeze(0)\n",
    "        # target = self.get_target_from_img_id(img_id)\n",
    "        # return img_tensor,target\n",
    "        image = self.get_image_from_img_id(img_id)\n",
    "        target = self.get_target_from_img_id(img_id)\n",
    "        if self.transform:\n",
    "            return self.transform((image,target))\n",
    "        return (image,target)\n",
    "\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.img_id_list)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "outputs": [],
   "source": [
    "train_list_file_name = r'F:\\VOCtrainval_06-Nov-2007\\VOCdevkit\\VOC2007\\ImageSets\\Layout\\train.txt'\n",
    "anno_path = r'F:\\VOCtrainval_06-Nov-2007\\VOCdevkit\\VOC2007\\Annotations'\n",
    "image_path = r'F:\\VOCtrainval_06-Nov-2007\\VOCdevkit\\VOC2007\\JPEGImages'"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "outputs": [],
   "source": [
    "# load a pre-trained model for classification and return\n",
    "# only the features\n",
    "backbone = torchvision.models.vgg16(pretrained=True).features\n",
    "# FasterRCNN needs to know the number of\n",
    "# output channels in a backbone. For mobilenet_v2, it's 1280\n",
    "# so we need to add it here\n",
    "backbone.out_channels = 512\n",
    "\n",
    "# let's make the RPN generate 5 x 3 anchors per spatial\n",
    "# location, with 5 different sizes and 3 different aspect\n",
    "# ratios. We have a Tuple[Tuple[int]] because each feature\n",
    "# map could potentially have different sizes and\n",
    "# aspect ratios\n",
    "# anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),\n",
    "#                                    aspect_ratios=((0.5, 1.0, 2.0),))\n",
    "anchor_generator = AnchorGenerator(sizes=((32, 64, 128),),\n",
    "                                   aspect_ratios=((0.5, 1.0, 2.0),))\n",
    "# let's define what are the feature maps that we will\n",
    "# use to perform the region of interest cropping, as well as\n",
    "# the size of the crop after rescaling.\n",
    "# if your backbone returns a Tensor, featmap_names is expected to\n",
    "# be ['0']. More generally, the backbone should return an\n",
    "# OrderedDict[Tensor], and in featmap_names you can choose which\n",
    "# feature maps to use.\n",
    "roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'],\n",
    "                                                output_size=7,\n",
    "                                                sampling_ratio=2)\n",
    "\n",
    "# put the pieces together inside a FasterRCNN model\n",
    "model = FasterRCNN(backbone,\n",
    "                   num_classes=21,\n",
    "                   rpn_anchor_generator=anchor_generator,\n",
    "                   box_roi_pool=roi_pooler)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[1, 2, 3], ['a', 'b', 'c']]\n"
     ]
    }
   ],
   "source": [
    "_a = [(1,'a'),(2,'b'),(3,'c')]\n",
    "print(list([list(x) for x in zip(*_a)]))"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "outputs": [],
   "source": [
    "from typing import Dict,Tuple,Sequence\n",
    "x_type = Sequence[Tuple[torch.Tensor,Dict[str,torch.Tensor]]]\n",
    "\n",
    "def collate_fn(x:x_type):\n",
    "    return list([list(x) for x in zip(*x)])\n",
    "    # return tuple(zip(*x))\n",
    "    # images = [a for a,_ in x]\n",
    "    # labels = [a['labels'].squeeze() for _,a in x]\n",
    "    # boxes = [a['boxes'].squeeze() for _,a in x]\n",
    "    # print(labels)\n",
    "    # print(boxes)\n",
    "    # images_tensor = torch.stack(images,dim=0)\n",
    "    # labels_tensor = torch.stack(labels,dim=0)\n",
    "    # boxes_tensor = torch.stack(boxes,dim=0).squeeze()\n",
    "    # return [images_tensor,{\n",
    "    #     'labels':labels_tensor,\n",
    "    #     'boxes':boxes_tensor\n",
    "    # }]"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[0, 1],\n",
      "        [0, 1],\n",
      "        [0, 1],\n",
      "        [0, 1]])\n",
      "torch.Size([4, 2])\n"
     ]
    }
   ],
   "source": [
    "_a = [torch.arange(0,2) for _ in range(4)]\n",
    "_b = torch.stack(_a,dim=0)\n",
    "print(_b)\n",
    "print(_b.shape)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "318\n",
      "[[tensor([[[69., 66., 67.,  ..., 55., 54., 51.],\n",
      "         [72., 69., 67.,  ..., 53., 50., 53.],\n",
      "         [65., 67., 68.,  ..., 60., 60., 58.],\n",
      "         ...,\n",
      "         [77., 78., 79.,  ..., 84., 85., 84.],\n",
      "         [80., 81., 82.,  ..., 78., 79., 79.],\n",
      "         [82., 83., 83.,  ..., 80., 79., 79.]],\n",
      "\n",
      "        [[69., 66., 67.,  ..., 57., 56., 53.],\n",
      "         [72., 69., 67.,  ..., 55., 52., 55.],\n",
      "         [65., 67., 68.,  ..., 62., 62., 60.],\n",
      "         ...,\n",
      "         [76., 77., 78.,  ..., 84., 85., 84.],\n",
      "         [79., 80., 81.,  ..., 78., 79., 79.],\n",
      "         [81., 82., 82.,  ..., 80., 79., 79.]],\n",
      "\n",
      "        [[67., 64., 65.,  ..., 56., 55., 52.],\n",
      "         [70., 67., 65.,  ..., 54., 51., 54.],\n",
      "         [63., 65., 66.,  ..., 61., 61., 59.],\n",
      "         ...,\n",
      "         [72., 73., 76.,  ..., 82., 83., 82.],\n",
      "         [75., 76., 79.,  ..., 76., 77., 77.],\n",
      "         [77., 78., 80.,  ..., 78., 77., 77.]]])], [{'boxes': tensor([[156.,  97., 351., 270.]]), 'labels': tensor([7])}]]\n"
     ]
    }
   ],
   "source": [
    "composed = torchvision.transforms.Compose([\n",
    "    # Rescale((226,226)),\n",
    "    ToTensor()\n",
    "])\n",
    "ds = VocDataset(image_path,anno_path,train_list_file_name,transform=composed)\n",
    "print(len(ds))\n",
    "# for tensor,target in ds:\n",
    "#     print(tensor.shape)\n",
    "#     print(target)\n",
    "#     break\n",
    "dl = torch.utils.data.DataLoader(ds,1,False,collate_fn=collate_fn)\n",
    "for batch in dl:\n",
    "    print(batch)\n",
    "    # print(batch[0].shape)\n",
    "    # print(batch[1].shape)\n",
    "    # print(batch[0].shape)\n",
    "    # print(batch[1]['boxes'].shape)\n",
    "    # print(batch[1]['labels'].shape)\n",
    "    break"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "outputs": [
    {
     "data": {
      "text/plain": "FasterRCNN(\n  (transform): GeneralizedRCNNTransform(\n      Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n      Resize(min_size=(800,), max_size=1333, mode='bilinear')\n  )\n  (backbone): Sequential(\n    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (1): ReLU(inplace=True)\n    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (3): ReLU(inplace=True)\n    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (6): ReLU(inplace=True)\n    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (8): ReLU(inplace=True)\n    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (11): ReLU(inplace=True)\n    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (13): ReLU(inplace=True)\n    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (15): ReLU(inplace=True)\n    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (18): ReLU(inplace=True)\n    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (20): ReLU(inplace=True)\n    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (22): ReLU(inplace=True)\n    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (25): ReLU(inplace=True)\n    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (27): ReLU(inplace=True)\n    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n    (29): ReLU(inplace=True)\n    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n  )\n  (rpn): RegionProposalNetwork(\n    (anchor_generator): AnchorGenerator()\n    (head): RPNHead(\n      (conv): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n      (cls_logits): Conv2d(512, 9, kernel_size=(1, 1), stride=(1, 1))\n      (bbox_pred): Conv2d(512, 36, kernel_size=(1, 1), stride=(1, 1))\n    )\n  )\n  (roi_heads): RoIHeads(\n    (box_roi_pool): MultiScaleRoIAlign()\n    (box_head): TwoMLPHead(\n      (fc6): Linear(in_features=25088, out_features=1024, bias=True)\n      (fc7): Linear(in_features=1024, out_features=1024, bias=True)\n    )\n    (box_predictor): FastRCNNPredictor(\n      (cls_score): Linear(in_features=1024, out_features=21, bias=True)\n      (bbox_pred): Linear(in_features=1024, out_features=84, bias=True)\n    )\n  )\n)"
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# eval mode\n",
    "# model.eval()\n",
    "# x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)]\n",
    "model.train()\n",
    "# predictions = model(x)\n",
    "# print(predictions)"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 4])\n",
      "torch.Size([1])\n",
      "tensor(15.1883, grad_fn=<AddBackward0>)\n",
      "torch.Size([1, 4])\n",
      "torch.Size([1])\n",
      "tensor(2024309.8750, grad_fn=<AddBackward0>)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "G:\\Program Files\\anaconda3\\envs\\pytorch_base\\lib\\site-packages\\torch\\nn\\functional.py:2854: UserWarning: The default behavior for interpolate/upsample with float scale_factor will change in 1.6.0 to align with other frameworks/libraries, and use scale_factor directly, instead of relying on the computed output size. If you wish to keep the old behavior, please set recompute_scale_factor=True. See the documentation of nn.Upsample for details. \n",
      "  warnings.warn(\"The default behavior for interpolate/upsample with float scale_factor will change \"\n",
      "..\\torch\\csrc\\utils\\python_arg_parser.cpp:756: UserWarning: This overload of nonzero is deprecated:\n",
      "\tnonzero(Tensor input, *, Tensor out)\n",
      "Consider using one of the following signatures instead:\n",
      "\tnonzero(Tensor input, *, bool as_tuple)\n"
     ]
    },
    {
     "ename": "RuntimeError",
     "evalue": "[enforce fail at ..\\c10\\core\\CPUAllocator.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 102760448 bytes. Buy new RAM!\n(no backtrace available)",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mRuntimeError\u001B[0m                              Traceback (most recent call last)",
      "\u001B[1;32m<ipython-input-16-6a8b38a43b96>\u001B[0m in \u001B[0;36m<module>\u001B[1;34m\u001B[0m\n\u001B[0;32m     10\u001B[0m     \u001B[0mprint\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mlosses\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m     11\u001B[0m     \u001B[0moptimizer\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mzero_grad\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m---> 12\u001B[1;33m     \u001B[0mlosses\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mbackward\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m     13\u001B[0m     \u001B[0moptimizer\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mstep\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m     14\u001B[0m     \u001B[1;31m# print(loss)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;32mG:\\Program Files\\anaconda3\\envs\\pytorch_base\\lib\\site-packages\\torch\\tensor.py\u001B[0m in \u001B[0;36mbackward\u001B[1;34m(self, gradient, retain_graph, create_graph)\u001B[0m\n\u001B[0;32m    196\u001B[0m                 \u001B[0mproducts\u001B[0m\u001B[1;33m.\u001B[0m \u001B[0mDefaults\u001B[0m \u001B[0mto\u001B[0m\u001B[0;31m \u001B[0m\u001B[0;31m`\u001B[0m\u001B[0;31m`\u001B[0m\u001B[1;32mFalse\u001B[0m\u001B[0;31m`\u001B[0m\u001B[0;31m`\u001B[0m\u001B[1;33m.\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m    197\u001B[0m         \"\"\"\n\u001B[1;32m--> 198\u001B[1;33m         \u001B[0mtorch\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mautograd\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mbackward\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mself\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mgradient\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mretain_graph\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mcreate_graph\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m    199\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m    200\u001B[0m     \u001B[1;32mdef\u001B[0m \u001B[0mregister_hook\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mself\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mhook\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;32mG:\\Program Files\\anaconda3\\envs\\pytorch_base\\lib\\site-packages\\torch\\autograd\\__init__.py\u001B[0m in \u001B[0;36mbackward\u001B[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables)\u001B[0m\n\u001B[0;32m     98\u001B[0m     Variable._execution_engine.run_backward(\n\u001B[0;32m     99\u001B[0m         \u001B[0mtensors\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mgrad_tensors\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mretain_graph\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mcreate_graph\u001B[0m\u001B[1;33m,\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m--> 100\u001B[1;33m         allow_unreachable=True)  # allow_unreachable flag\n\u001B[0m\u001B[0;32m    101\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m    102\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n",
      "\u001B[1;31mRuntimeError\u001B[0m: [enforce fail at ..\\c10\\core\\CPUAllocator.cpp:72] data. DefaultCPUAllocator: not enough memory: you tried to allocate 102760448 bytes. Buy new RAM!\n(no backtrace available)"
     ]
    }
   ],
   "source": [
    "params = [p for p in model.parameters() if p.requires_grad]\n",
    "optimizer = torch.optim.SGD(\n",
    "    params, lr=0.02, momentum=0.9, weight_decay=1e-4)\n",
    "for x,y in dl:\n",
    "    # print(x.shape)\n",
    "    print(y[0]['boxes'].shape)\n",
    "    print(y[0]['labels'].shape)\n",
    "    loss_dict = model(x,y)\n",
    "    losses = sum(loss for loss in loss_dict.values())\n",
    "    print(losses)\n",
    "    optimizer.zero_grad()\n",
    "    losses.backward()\n",
    "    optimizer.step()\n",
    "    # print(loss)\n",
    "    # break"
   ],
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   }
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}