{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.cluster import KMeans, SpectralClustering,DBSCAN\n",
    "from sklearn.feature_extraction import image\n",
    "import mmcv\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_DFL_vec_to_numpy(img_id):\n",
    "\n",
    "    # load DFL vector\n",
    "    DFL_vec = mmcv.load('/home/gejunyao/fast_cache/DFL_Cache/'+ img_id + '.pkl')\n",
    "    # transfer DFL from (vec, h, w) to (h, w, vec)\n",
    "    DFL_vec = DFL_vec.permute(1, 2, 0).numpy()\n",
    "    return DFL_vec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using matplotlib backend: TkAgg\n"
     ]
    }
   ],
   "source": [
    "%matplotlib\n",
    "IMG_ROOT = '/media/gejunyao/Disk1/Datasets/CAS-OpenSARShip/ship_detection_online/VOC2012/JPEGImages/'\n",
    "IMG_ID = 'Sen_ship_hh_0201610030202201'\n",
    "N_CLUSTER = 2\n",
    "DFL_vec = load_DFL_vec_to_numpy(IMG_ID)\n",
    "h, w, vec_len = DFL_vec.shape\n",
    "# cluster start here\n",
    "flatten_DFL_vec = DFL_vec.reshape(-1, vec_len)\n",
    "db = DBSCAN(eps=0.4, min_samples=10).fit(flatten_DFL_vec)\n",
    "core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n",
    "core_samples_mask[db.core_sample_indices_] = True\n",
    "labels = db.labels_\n",
    "clustered_DFL = labels.reshape(h, w)\n",
    "# read raw image\n",
    "img = mmcv.imread(IMG_ROOT + IMG_ID + '.jpg')\n",
    "rescale_img = mmcv.imrescale(img, (h, w))\n",
    "padded_img = mmcv.impad(rescale_img, shape=(h, w))\n",
    "# visualization\n",
    "plt.figure()\n",
    "plt.suptitle('cluster result for {}, n_clusters={}'.format(IMG_ID, N_CLUSTER))\n",
    "plt.subplot(1,3,1)\n",
    "plt.imshow(padded_img)\n",
    "plt.title('raw image')  \n",
    "plt.subplot(1,3,2)                                                           \n",
    "plt.imshow(clustered_DFL)\n",
    "plt.title('clustered DFL')\n",
    "plt.subplot(1,3,3)\n",
    "plt.imshow(padded_img)\n",
    "plt.imshow(clustered_DFL, alpha=0.3)\n",
    "plt.title('fused image')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 计算中心到边框相似度的内容"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using matplotlib backend: TkAgg\n"
     ]
    }
   ],
   "source": [
    "'''\n",
    "Author: SlytherinGe\n",
    "LastEditTime: 2021-10-22 19:58:48\n",
    "'''\n",
    "import dataset_utilty.voc_label_utility as VL\n",
    "import mmcv\n",
    "import torch\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os\n",
    "\n",
    "%matplotlib\n",
    "\n",
    "IMG_ROOT = '/media/gejunyao/Disk1/Datasets/SSDD/VOC2012/JPEGImages/'\n",
    "IMG_ID = '000395'\n",
    "ANNO_ROOT = '/media/gejunyao/Disk1/Datasets/SSDD/VOC2012/annotations_hbb/'\n",
    "\n",
    "def load_DFL_vec_to_numpy(img_id):\n",
    "\n",
    "    # load DFL vector\n",
    "    DFL_vec = mmcv.load('/home/gejunyao/ramdisk/DFL_Cache/'+ img_id + '.pkl')\n",
    "    # transfer DFL from (vec, h, w) to (h, w, vec)\n",
    "    DFL_vec = DFL_vec.permute(1, 2, 0)\n",
    "    return DFL_vec\n",
    "\n",
    "def get_similarity_map(test_pos, DFL_vec):\n",
    "\n",
    "    h, w, vec_len = DFL_vec.shape\n",
    "\n",
    "    DFL_vec = DFL_vec.clone().reshape(-1, vec_len)\n",
    "    # normailize DFL vec\n",
    "    norm_DFL_vec = DFL_vec #/ \\\n",
    "                   # (torch.norm(DFL_vec, dim=1, keepdim=True) + 1e-9)\n",
    "    # get test vec\n",
    "    index = test_pos[0] + test_pos[1]*h\n",
    "    test_vec = DFL_vec[index]\n",
    "    vec = torch.norm(norm_DFL_vec-test_vec, dim=1)\n",
    "\n",
    "    # transform\n",
    "    # vec = -torch.log10(1 - vec + 0.00001)\n",
    "    vis_vec = vec.reshape(h, w, 1)\n",
    "\n",
    "    return vis_vec.numpy()\n",
    "\n",
    "anno_path = os.path.join(ANNO_ROOT, IMG_ID+'.xml')\n",
    "\n",
    "DFL_vec = load_DFL_vec_to_numpy(IMG_ID)\n",
    "\n",
    "anno_info, anno_obj = VL.voc_label_preprocess(anno_path)\n",
    "detected_masks = []\n",
    "\n",
    "h, w, vec_len = DFL_vec.shape\n",
    "img = mmcv.imread(IMG_ROOT + IMG_ID + '.jpg')\n",
    "im_h, im_w = img.shape[0], img.shape[1]\n",
    "scale_factor = w/im_w\n",
    "rescale_img = mmcv.imrescale(img, (h, w))\n",
    "padded_img = mmcv.impad(rescale_img, shape=(h, w))\n",
    "\n",
    "seg_result = np.zeros((h, w, 1))\n",
    "\n",
    "for k, obj in enumerate(anno_obj):\n",
    "\n",
    "    dict_obj = VL.voc_object_xml_element_resolver(obj)\n",
    "    bbox = dict_obj['bndbox']\n",
    "    xmin, ymin, xmax, ymax = float(bbox['xmin']), float(bbox['ymin']), float(bbox['xmax']), float(bbox['ymax'])\n",
    "    xmin, ymin, xmax, ymax = int(xmin*scale_factor), int(ymin*scale_factor), int(xmax*scale_factor), int(ymax*scale_factor)\n",
    "    xcenter = (xmin + xmax + 1) / 2\n",
    "    ycenter = (ymin + ymax + 1) / 2\n",
    "    similar_map =  get_similarity_map((int(xcenter), int(ycenter)), DFL_vec)\n",
    "    mask = np.zeros_like(similar_map)\n",
    "    # generate bbox border points\n",
    "    xpos = np.arange(xmin, xmax + 1)\n",
    "    ypos = np.arange(ymin + 1, ymax)\n",
    "    yones = np.ones_like(xpos)\n",
    "    xones = np.ones_like(ypos)\n",
    "    pts = [np.vstack((xpos, yones * ymin))]\n",
    "    pts += [np.vstack((xpos, yones * ymax))]\n",
    "    pts += [np.vstack((xones * xmin, ypos))]\n",
    "    pts += [np.vstack((xones * xmax, ypos))]\n",
    "    pts = np.hstack(pts)\n",
    "    xy_pts = pts.T\n",
    "    similar_map_bbox_border = similar_map[xy_pts[:,1], xy_pts[:,0], 0]\n",
    "    sorted_border = np.sort(similar_map_bbox_border)\n",
    "    shortest_len = sorted_border[len(sorted_border) // 2]\n",
    "    # print(xcenter, ycenter)\n",
    "    # print(xy_pts)\n",
    "    mask[ymin:ymax+1, xmin:xmax+1, :] = (similar_map[ymin:ymax+1, xmin:xmax+1, :] <= shortest_len) * (k + 1)\n",
    "\n",
    "    seg_result += mask\n",
    "\n",
    "plt.figure()\n",
    "plt.imshow(padded_img)\n",
    "plt.imshow(seg_result, alpha=0.7)\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 可视化Attention Map"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "Author: SlytherinGe\n",
    "LastEditTime: 2021-10-29 10:59:45\n",
    "'''\n",
    "import mmcv\n",
    "import torch\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "# (x, y) pos\n",
    "IMG_ROOT = '/media/gejunyao/Disk1/Datasets/SSDD/VOC2012/JPEGImages/'\n",
    "IMG_ID = '000395'\n",
    "\n",
    "# load DFL vector\n",
    "attention_map = mmcv.load('/home/gejunyao/ramdisk/DFL_Cache/'+ IMG_ID + '.pkl')[1]\n",
    "\n",
    "attention_map = attention_map.numpy()[0]\n",
    "\n",
    "plt.figure()\n",
    "plt.imshow(attention_map)\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 为rbox生成用于分割网络的mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 1 / 1160 finished\n",
      " 2 / 1160 finished\n",
      " 3 / 1160 finished\n",
      " 4 / 1160 finished\n",
      " 5 / 1160 finished\n",
      " 6 / 1160 finished\n",
      " 7 / 1160 finished\n",
      " 8 / 1160 finished\n",
      " 9 / 1160 finished\n",
      " 10 / 1160 finished\n",
      " 11 / 1160 finished\n",
      " 12 / 1160 finished\n",
      " 13 / 1160 finished\n",
      " 14 / 1160 finished\n",
      " 15 / 1160 finished\n",
      " 16 / 1160 finished\n",
      " 17 / 1160 finished\n",
      " 18 / 1160 finished\n",
      " 19 / 1160 finished\n",
      " 20 / 1160 finished\n",
      " 21 / 1160 finished\n",
      " 22 / 1160 finished\n",
      " 23 / 1160 finished\n",
      " 24 / 1160 finished\n",
      " 25 / 1160 finished\n",
      " 26 / 1160 finished\n",
      " 27 / 1160 finished\n",
      " 28 / 1160 finished\n",
      " 29 / 1160 finished\n",
      " 30 / 1160 finished\n",
      " 31 / 1160 finished\n",
      " 32 / 1160 finished\n",
      " 33 / 1160 finished\n",
      " 34 / 1160 finished\n",
      " 35 / 1160 finished\n",
      " 36 / 1160 finished\n",
      " 37 / 1160 finished\n",
      " 38 / 1160 finished\n",
      " 39 / 1160 finished\n",
      " 40 / 1160 finished\n",
      " 41 / 1160 finished\n",
      " 42 / 1160 finished\n",
      " 43 / 1160 finished\n",
      " 44 / 1160 finished\n",
      " 45 / 1160 finished\n",
      " 46 / 1160 finished\n",
      " 47 / 1160 finished\n",
      " 48 / 1160 finished\n",
      " 49 / 1160 finished\n",
      " 50 / 1160 finished\n",
      " 51 / 1160 finished\n",
      " 52 / 1160 finished\n",
      " 53 / 1160 finished\n",
      " 54 / 1160 finished\n",
      " 55 / 1160 finished\n",
      " 56 / 1160 finished\n",
      " 57 / 1160 finished\n",
      " 58 / 1160 finished\n",
      " 59 / 1160 finished\n",
      " 60 / 1160 finished\n",
      " 61 / 1160 finished\n",
      " 62 / 1160 finished\n",
      " 63 / 1160 finished\n",
      " 64 / 1160 finished\n",
      " 65 / 1160 finished\n",
      " 66 / 1160 finished\n",
      " 67 / 1160 finished\n",
      " 68 / 1160 finished\n",
      " 69 / 1160 finished\n",
      " 70 / 1160 finished\n",
      " 71 / 1160 finished\n",
      " 72 / 1160 finished\n",
      " 73 / 1160 finished\n",
      " 74 / 1160 finished\n",
      " 75 / 1160 finished\n",
      " 76 / 1160 finished\n",
      " 77 / 1160 finished\n",
      " 78 / 1160 finished\n",
      " 79 / 1160 finished\n",
      " 80 / 1160 finished\n",
      " 81 / 1160 finished\n",
      " 82 / 1160 finished\n",
      " 83 / 1160 finished\n",
      " 84 / 1160 finished\n",
      " 85 / 1160 finished\n",
      " 86 / 1160 finished\n",
      " 87 / 1160 finished\n",
      " 88 / 1160 finished\n",
      " 89 / 1160 finished\n",
      " 90 / 1160 finished\n",
      " 91 / 1160 finished\n",
      " 92 / 1160 finished\n",
      " 93 / 1160 finished\n",
      " 94 / 1160 finished\n",
      " 95 / 1160 finished\n",
      " 96 / 1160 finished\n",
      " 97 / 1160 finished\n",
      " 98 / 1160 finished\n",
      " 99 / 1160 finished\n",
      " 100 / 1160 finished\n",
      " 101 / 1160 finished\n",
      " 102 / 1160 finished\n",
      " 103 / 1160 finished\n",
      " 104 / 1160 finished\n",
      " 105 / 1160 finished\n",
      " 106 / 1160 finished\n",
      " 107 / 1160 finished\n",
      " 108 / 1160 finished\n",
      " 109 / 1160 finished\n",
      " 110 / 1160 finished\n",
      " 111 / 1160 finished\n",
      " 112 / 1160 finished\n",
      " 113 / 1160 finished\n",
      " 114 / 1160 finished\n",
      " 115 / 1160 finished\n",
      " 116 / 1160 finished\n",
      " 117 / 1160 finished\n",
      " 118 / 1160 finished\n",
      " 119 / 1160 finished\n",
      " 120 / 1160 finished\n",
      " 121 / 1160 finished\n",
      " 122 / 1160 finished\n",
      " 123 / 1160 finished\n",
      " 124 / 1160 finished\n",
      " 125 / 1160 finished\n",
      " 126 / 1160 finished\n",
      " 127 / 1160 finished\n",
      " 128 / 1160 finished\n",
      " 129 / 1160 finished\n",
      " 130 / 1160 finished\n",
      " 131 / 1160 finished\n",
      " 132 / 1160 finished\n",
      " 133 / 1160 finished\n",
      " 134 / 1160 finished\n",
      " 135 / 1160 finished\n",
      " 136 / 1160 finished\n",
      " 137 / 1160 finished\n",
      " 138 / 1160 finished\n",
      " 139 / 1160 finished\n",
      " 140 / 1160 finished\n",
      " 141 / 1160 finished\n",
      " 142 / 1160 finished\n",
      " 143 / 1160 finished\n",
      " 144 / 1160 finished\n",
      " 145 / 1160 finished\n",
      " 146 / 1160 finished\n",
      " 147 / 1160 finished\n",
      " 148 / 1160 finished\n",
      " 149 / 1160 finished\n",
      " 150 / 1160 finished\n",
      " 151 / 1160 finished\n",
      " 152 / 1160 finished\n",
      " 153 / 1160 finished\n",
      " 154 / 1160 finished\n",
      " 155 / 1160 finished\n",
      " 156 / 1160 finished\n",
      " 157 / 1160 finished\n",
      " 158 / 1160 finished\n",
      " 159 / 1160 finished\n",
      " 160 / 1160 finished\n",
      " 161 / 1160 finished\n",
      " 162 / 1160 finished\n",
      " 163 / 1160 finished\n",
      " 164 / 1160 finished\n",
      " 165 / 1160 finished\n",
      " 166 / 1160 finished\n",
      " 167 / 1160 finished\n",
      " 168 / 1160 finished\n",
      " 169 / 1160 finished\n",
      " 170 / 1160 finished\n",
      " 171 / 1160 finished\n",
      " 172 / 1160 finished\n",
      " 173 / 1160 finished\n",
      " 174 / 1160 finished\n",
      " 175 / 1160 finished\n",
      " 176 / 1160 finished\n",
      " 177 / 1160 finished\n",
      " 178 / 1160 finished\n",
      " 179 / 1160 finished\n",
      " 180 / 1160 finished\n",
      " 181 / 1160 finished\n",
      " 182 / 1160 finished\n",
      " 183 / 1160 finished\n",
      " 184 / 1160 finished\n",
      " 185 / 1160 finished\n",
      " 186 / 1160 finished\n",
      " 187 / 1160 finished\n",
      " 188 / 1160 finished\n",
      " 189 / 1160 finished\n",
      " 190 / 1160 finished\n",
      " 191 / 1160 finished\n",
      " 192 / 1160 finished\n",
      " 193 / 1160 finished\n",
      " 194 / 1160 finished\n",
      " 195 / 1160 finished\n",
      " 196 / 1160 finished\n",
      " 197 / 1160 finished\n",
      " 198 / 1160 finished\n",
      " 199 / 1160 finished\n",
      " 200 / 1160 finished\n",
      " 201 / 1160 finished\n",
      " 202 / 1160 finished\n",
      " 203 / 1160 finished\n",
      " 204 / 1160 finished\n",
      " 205 / 1160 finished\n",
      " 206 / 1160 finished\n",
      " 207 / 1160 finished\n",
      " 208 / 1160 finished\n",
      " 209 / 1160 finished\n",
      " 210 / 1160 finished\n",
      " 211 / 1160 finished\n",
      " 212 / 1160 finished\n",
      " 213 / 1160 finished\n",
      " 214 / 1160 finished\n",
      " 215 / 1160 finished\n",
      " 216 / 1160 finished\n",
      " 217 / 1160 finished\n",
      " 218 / 1160 finished\n",
      " 219 / 1160 finished\n",
      " 220 / 1160 finished\n",
      " 221 / 1160 finished\n",
      " 222 / 1160 finished\n",
      " 223 / 1160 finished\n",
      " 224 / 1160 finished\n",
      " 225 / 1160 finished\n",
      " 226 / 1160 finished\n",
      " 227 / 1160 finished\n",
      " 228 / 1160 finished\n",
      " 229 / 1160 finished\n",
      " 230 / 1160 finished\n",
      " 231 / 1160 finished\n",
      " 232 / 1160 finished\n",
      " 233 / 1160 finished\n",
      " 234 / 1160 finished\n",
      " 235 / 1160 finished\n",
      " 236 / 1160 finished\n",
      " 237 / 1160 finished\n",
      " 238 / 1160 finished\n",
      " 239 / 1160 finished\n",
      " 240 / 1160 finished\n",
      " 241 / 1160 finished\n",
      " 242 / 1160 finished\n",
      " 243 / 1160 finished\n",
      " 244 / 1160 finished\n",
      " 245 / 1160 finished\n",
      " 246 / 1160 finished\n",
      " 247 / 1160 finished\n",
      " 248 / 1160 finished\n",
      " 249 / 1160 finished\n",
      " 250 / 1160 finished\n",
      " 251 / 1160 finished\n",
      " 252 / 1160 finished\n",
      " 253 / 1160 finished\n",
      " 254 / 1160 finished\n",
      " 255 / 1160 finished\n",
      " 256 / 1160 finished\n",
      " 257 / 1160 finished\n",
      " 258 / 1160 finished\n",
      " 259 / 1160 finished\n",
      " 260 / 1160 finished\n",
      " 261 / 1160 finished\n",
      " 262 / 1160 finished\n",
      " 263 / 1160 finished\n",
      " 264 / 1160 finished\n",
      " 265 / 1160 finished\n",
      " 266 / 1160 finished\n",
      " 267 / 1160 finished\n",
      " 268 / 1160 finished\n",
      " 269 / 1160 finished\n",
      " 270 / 1160 finished\n",
      " 271 / 1160 finished\n",
      " 272 / 1160 finished\n",
      " 273 / 1160 finished\n",
      " 274 / 1160 finished\n",
      " 275 / 1160 finished\n",
      " 276 / 1160 finished\n",
      " 277 / 1160 finished\n",
      " 278 / 1160 finished\n",
      " 279 / 1160 finished\n",
      " 280 / 1160 finished\n",
      " 281 / 1160 finished\n",
      " 282 / 1160 finished\n",
      " 283 / 1160 finished\n",
      " 284 / 1160 finished\n",
      " 285 / 1160 finished\n",
      " 286 / 1160 finished\n",
      " 287 / 1160 finished\n",
      " 288 / 1160 finished\n",
      " 289 / 1160 finished\n",
      " 290 / 1160 finished\n",
      " 291 / 1160 finished\n",
      " 292 / 1160 finished\n",
      " 293 / 1160 finished\n",
      " 294 / 1160 finished\n",
      " 295 / 1160 finished\n",
      " 296 / 1160 finished\n",
      " 297 / 1160 finished\n",
      " 298 / 1160 finished\n",
      " 299 / 1160 finished\n",
      " 300 / 1160 finished\n",
      " 301 / 1160 finished\n",
      " 302 / 1160 finished\n",
      " 303 / 1160 finished\n",
      " 304 / 1160 finished\n",
      " 305 / 1160 finished\n",
      " 306 / 1160 finished\n",
      " 307 / 1160 finished\n",
      " 308 / 1160 finished\n",
      " 309 / 1160 finished\n",
      " 310 / 1160 finished\n",
      " 311 / 1160 finished\n",
      " 312 / 1160 finished\n",
      " 313 / 1160 finished\n",
      " 314 / 1160 finished\n",
      " 315 / 1160 finished\n",
      " 316 / 1160 finished\n",
      " 317 / 1160 finished\n",
      " 318 / 1160 finished\n",
      " 319 / 1160 finished\n",
      " 320 / 1160 finished\n",
      " 321 / 1160 finished\n",
      " 322 / 1160 finished\n",
      " 323 / 1160 finished\n",
      " 324 / 1160 finished\n",
      " 325 / 1160 finished\n",
      " 326 / 1160 finished\n",
      " 327 / 1160 finished\n",
      " 328 / 1160 finished\n",
      " 329 / 1160 finished\n",
      " 330 / 1160 finished\n",
      " 331 / 1160 finished\n",
      " 332 / 1160 finished\n",
      " 333 / 1160 finished\n",
      " 334 / 1160 finished\n",
      " 335 / 1160 finished\n",
      " 336 / 1160 finished\n",
      " 337 / 1160 finished\n",
      " 338 / 1160 finished\n",
      " 339 / 1160 finished\n",
      " 340 / 1160 finished\n",
      " 341 / 1160 finished\n",
      " 342 / 1160 finished\n",
      " 343 / 1160 finished\n",
      " 344 / 1160 finished\n",
      " 345 / 1160 finished\n",
      " 346 / 1160 finished\n",
      " 347 / 1160 finished\n",
      " 348 / 1160 finished\n",
      " 349 / 1160 finished\n",
      " 350 / 1160 finished\n",
      " 351 / 1160 finished\n",
      " 352 / 1160 finished\n",
      " 353 / 1160 finished\n",
      " 354 / 1160 finished\n",
      " 355 / 1160 finished\n",
      " 356 / 1160 finished\n",
      " 357 / 1160 finished\n",
      " 358 / 1160 finished\n",
      " 359 / 1160 finished\n",
      " 360 / 1160 finished\n",
      " 361 / 1160 finished\n",
      " 362 / 1160 finished\n",
      " 363 / 1160 finished\n",
      " 364 / 1160 finished\n",
      " 365 / 1160 finished\n",
      " 366 / 1160 finished\n",
      " 367 / 1160 finished\n",
      " 368 / 1160 finished\n",
      " 369 / 1160 finished\n",
      " 370 / 1160 finished\n",
      " 371 / 1160 finished\n",
      " 372 / 1160 finished\n",
      " 373 / 1160 finished\n",
      " 374 / 1160 finished\n",
      " 375 / 1160 finished\n",
      " 376 / 1160 finished\n",
      " 377 / 1160 finished\n",
      " 378 / 1160 finished\n",
      " 379 / 1160 finished\n",
      " 380 / 1160 finished\n",
      " 381 / 1160 finished\n",
      " 382 / 1160 finished\n",
      " 383 / 1160 finished\n",
      " 384 / 1160 finished\n",
      " 385 / 1160 finished\n",
      " 386 / 1160 finished\n",
      " 387 / 1160 finished\n",
      " 388 / 1160 finished\n",
      " 389 / 1160 finished\n",
      " 390 / 1160 finished\n",
      " 391 / 1160 finished\n",
      " 392 / 1160 finished\n",
      " 393 / 1160 finished\n",
      " 394 / 1160 finished\n",
      " 395 / 1160 finished\n",
      " 396 / 1160 finished\n",
      " 397 / 1160 finished\n",
      " 398 / 1160 finished\n",
      " 399 / 1160 finished\n",
      " 400 / 1160 finished\n",
      " 401 / 1160 finished\n",
      " 402 / 1160 finished\n",
      " 403 / 1160 finished\n",
      " 404 / 1160 finished\n",
      " 405 / 1160 finished\n",
      " 406 / 1160 finished\n",
      " 407 / 1160 finished\n",
      " 408 / 1160 finished\n",
      " 409 / 1160 finished\n",
      " 410 / 1160 finished\n",
      " 411 / 1160 finished\n",
      " 412 / 1160 finished\n",
      " 413 / 1160 finished\n",
      " 414 / 1160 finished\n",
      " 415 / 1160 finished\n",
      " 416 / 1160 finished\n",
      " 417 / 1160 finished\n",
      " 418 / 1160 finished\n",
      " 419 / 1160 finished\n",
      " 420 / 1160 finished\n",
      " 421 / 1160 finished\n",
      " 422 / 1160 finished\n",
      " 423 / 1160 finished\n",
      " 424 / 1160 finished\n",
      " 425 / 1160 finished\n",
      " 426 / 1160 finished\n",
      " 427 / 1160 finished\n",
      " 428 / 1160 finished\n",
      " 429 / 1160 finished\n",
      " 430 / 1160 finished\n",
      " 431 / 1160 finished\n",
      " 432 / 1160 finished\n",
      " 433 / 1160 finished\n",
      " 434 / 1160 finished\n",
      " 435 / 1160 finished\n",
      " 436 / 1160 finished\n",
      " 437 / 1160 finished\n",
      " 438 / 1160 finished\n",
      " 439 / 1160 finished\n",
      " 440 / 1160 finished\n",
      " 441 / 1160 finished\n",
      " 442 / 1160 finished\n",
      " 443 / 1160 finished\n",
      " 444 / 1160 finished\n",
      " 445 / 1160 finished\n",
      " 446 / 1160 finished\n",
      " 447 / 1160 finished\n",
      " 448 / 1160 finished\n",
      " 449 / 1160 finished\n",
      " 450 / 1160 finished\n",
      " 451 / 1160 finished\n",
      " 452 / 1160 finished\n",
      " 453 / 1160 finished\n",
      " 454 / 1160 finished\n",
      " 455 / 1160 finished\n",
      " 456 / 1160 finished\n",
      " 457 / 1160 finished\n",
      " 458 / 1160 finished\n",
      " 459 / 1160 finished\n",
      " 460 / 1160 finished\n",
      " 461 / 1160 finished\n",
      " 462 / 1160 finished\n",
      " 463 / 1160 finished\n",
      " 464 / 1160 finished\n",
      " 465 / 1160 finished\n",
      " 466 / 1160 finished\n",
      " 467 / 1160 finished\n",
      " 468 / 1160 finished\n",
      " 469 / 1160 finished\n",
      " 470 / 1160 finished\n",
      " 471 / 1160 finished\n",
      " 472 / 1160 finished\n",
      " 473 / 1160 finished\n",
      " 474 / 1160 finished\n",
      " 475 / 1160 finished\n",
      " 476 / 1160 finished\n",
      " 477 / 1160 finished\n",
      " 478 / 1160 finished\n",
      " 479 / 1160 finished\n",
      " 480 / 1160 finished\n",
      " 481 / 1160 finished\n",
      " 482 / 1160 finished\n",
      " 483 / 1160 finished\n",
      " 484 / 1160 finished\n",
      " 485 / 1160 finished\n",
      " 486 / 1160 finished\n",
      " 487 / 1160 finished\n",
      " 488 / 1160 finished\n",
      " 489 / 1160 finished\n",
      " 490 / 1160 finished\n",
      " 491 / 1160 finished\n",
      " 492 / 1160 finished\n",
      " 493 / 1160 finished\n",
      " 494 / 1160 finished\n",
      " 495 / 1160 finished\n",
      " 496 / 1160 finished\n",
      " 497 / 1160 finished\n",
      " 498 / 1160 finished\n",
      " 499 / 1160 finished\n",
      " 500 / 1160 finished\n",
      " 501 / 1160 finished\n",
      " 502 / 1160 finished\n",
      " 503 / 1160 finished\n",
      " 504 / 1160 finished\n",
      " 505 / 1160 finished\n",
      " 506 / 1160 finished\n",
      " 507 / 1160 finished\n",
      " 508 / 1160 finished\n",
      " 509 / 1160 finished\n",
      " 510 / 1160 finished\n",
      " 511 / 1160 finished\n",
      " 512 / 1160 finished\n",
      " 513 / 1160 finished\n",
      " 514 / 1160 finished\n",
      " 515 / 1160 finished\n",
      " 516 / 1160 finished\n",
      " 517 / 1160 finished\n",
      " 518 / 1160 finished\n",
      " 519 / 1160 finished\n",
      " 520 / 1160 finished\n",
      " 521 / 1160 finished\n",
      " 522 / 1160 finished\n",
      " 523 / 1160 finished\n",
      " 524 / 1160 finished\n",
      " 525 / 1160 finished\n",
      " 526 / 1160 finished\n",
      " 527 / 1160 finished\n",
      " 528 / 1160 finished\n",
      " 529 / 1160 finished\n",
      " 530 / 1160 finished\n",
      " 531 / 1160 finished\n",
      " 532 / 1160 finished\n",
      " 533 / 1160 finished\n",
      " 534 / 1160 finished\n",
      " 535 / 1160 finished\n",
      " 536 / 1160 finished\n",
      " 537 / 1160 finished\n",
      " 538 / 1160 finished\n",
      " 539 / 1160 finished\n",
      " 540 / 1160 finished\n",
      " 541 / 1160 finished\n",
      " 542 / 1160 finished\n",
      " 543 / 1160 finished\n",
      " 544 / 1160 finished\n",
      " 545 / 1160 finished\n",
      " 546 / 1160 finished\n",
      " 547 / 1160 finished\n",
      " 548 / 1160 finished\n",
      " 549 / 1160 finished\n",
      " 550 / 1160 finished\n",
      " 551 / 1160 finished\n",
      " 552 / 1160 finished\n",
      " 553 / 1160 finished\n",
      " 554 / 1160 finished\n",
      " 555 / 1160 finished\n",
      " 556 / 1160 finished\n",
      " 557 / 1160 finished\n",
      " 558 / 1160 finished\n",
      " 559 / 1160 finished\n",
      " 560 / 1160 finished\n",
      " 561 / 1160 finished\n",
      " 562 / 1160 finished\n",
      " 563 / 1160 finished\n",
      " 564 / 1160 finished\n",
      " 565 / 1160 finished\n",
      " 566 / 1160 finished\n",
      " 567 / 1160 finished\n",
      " 568 / 1160 finished\n",
      " 569 / 1160 finished\n",
      " 570 / 1160 finished\n",
      " 571 / 1160 finished\n",
      " 572 / 1160 finished\n",
      " 573 / 1160 finished\n",
      " 574 / 1160 finished\n",
      " 575 / 1160 finished\n",
      " 576 / 1160 finished\n",
      " 577 / 1160 finished\n",
      " 578 / 1160 finished\n",
      " 579 / 1160 finished\n",
      " 580 / 1160 finished\n",
      " 581 / 1160 finished\n",
      " 582 / 1160 finished\n",
      " 583 / 1160 finished\n",
      " 584 / 1160 finished\n",
      " 585 / 1160 finished\n",
      " 586 / 1160 finished\n",
      " 587 / 1160 finished\n",
      " 588 / 1160 finished\n",
      " 589 / 1160 finished\n",
      " 590 / 1160 finished\n",
      " 591 / 1160 finished\n",
      " 592 / 1160 finished\n",
      " 593 / 1160 finished\n",
      " 594 / 1160 finished\n",
      " 595 / 1160 finished\n",
      " 596 / 1160 finished\n",
      " 597 / 1160 finished\n",
      " 598 / 1160 finished\n",
      " 599 / 1160 finished\n",
      " 600 / 1160 finished\n",
      " 601 / 1160 finished\n",
      " 602 / 1160 finished\n",
      " 603 / 1160 finished\n",
      " 604 / 1160 finished\n",
      " 605 / 1160 finished\n",
      " 606 / 1160 finished\n",
      " 607 / 1160 finished\n",
      " 608 / 1160 finished\n",
      " 609 / 1160 finished\n",
      " 610 / 1160 finished\n",
      " 611 / 1160 finished\n",
      " 612 / 1160 finished\n",
      " 613 / 1160 finished\n",
      " 614 / 1160 finished\n",
      " 615 / 1160 finished\n",
      " 616 / 1160 finished\n",
      " 617 / 1160 finished\n",
      " 618 / 1160 finished\n",
      " 619 / 1160 finished\n",
      " 620 / 1160 finished\n",
      " 621 / 1160 finished\n",
      " 622 / 1160 finished\n",
      " 623 / 1160 finished\n",
      " 624 / 1160 finished\n",
      " 625 / 1160 finished\n",
      " 626 / 1160 finished\n",
      " 627 / 1160 finished\n",
      " 628 / 1160 finished\n",
      " 629 / 1160 finished\n",
      " 630 / 1160 finished\n",
      " 631 / 1160 finished\n",
      " 632 / 1160 finished\n",
      " 633 / 1160 finished\n",
      " 634 / 1160 finished\n",
      " 635 / 1160 finished\n",
      " 636 / 1160 finished\n",
      " 637 / 1160 finished\n",
      " 638 / 1160 finished\n",
      " 639 / 1160 finished\n",
      " 640 / 1160 finished\n",
      " 641 / 1160 finished\n",
      " 642 / 1160 finished\n",
      " 643 / 1160 finished\n",
      " 644 / 1160 finished\n",
      " 645 / 1160 finished\n",
      " 646 / 1160 finished\n",
      " 647 / 1160 finished\n",
      " 648 / 1160 finished\n",
      " 649 / 1160 finished\n",
      " 650 / 1160 finished\n",
      " 651 / 1160 finished\n",
      " 652 / 1160 finished\n",
      " 653 / 1160 finished\n",
      " 654 / 1160 finished\n",
      " 655 / 1160 finished\n",
      " 656 / 1160 finished\n",
      " 657 / 1160 finished\n",
      " 658 / 1160 finished\n",
      " 659 / 1160 finished\n",
      " 660 / 1160 finished\n",
      " 661 / 1160 finished\n",
      " 662 / 1160 finished\n",
      " 663 / 1160 finished\n",
      " 664 / 1160 finished\n",
      " 665 / 1160 finished\n",
      " 666 / 1160 finished\n",
      " 667 / 1160 finished\n",
      " 668 / 1160 finished\n",
      " 669 / 1160 finished\n",
      " 670 / 1160 finished\n",
      " 671 / 1160 finished\n",
      " 672 / 1160 finished\n",
      " 673 / 1160 finished\n",
      " 674 / 1160 finished\n",
      " 675 / 1160 finished\n",
      " 676 / 1160 finished\n",
      " 677 / 1160 finished\n",
      " 678 / 1160 finished\n",
      " 679 / 1160 finished\n",
      " 680 / 1160 finished\n",
      " 681 / 1160 finished\n",
      " 682 / 1160 finished\n",
      " 683 / 1160 finished\n",
      " 684 / 1160 finished\n",
      " 685 / 1160 finished\n",
      " 686 / 1160 finished\n",
      " 687 / 1160 finished\n",
      " 688 / 1160 finished\n",
      " 689 / 1160 finished\n",
      " 690 / 1160 finished\n",
      " 691 / 1160 finished\n",
      " 692 / 1160 finished\n",
      " 693 / 1160 finished\n",
      " 694 / 1160 finished\n",
      " 695 / 1160 finished\n",
      " 696 / 1160 finished\n",
      " 697 / 1160 finished\n",
      " 698 / 1160 finished\n",
      " 699 / 1160 finished\n",
      " 700 / 1160 finished\n",
      " 701 / 1160 finished\n",
      " 702 / 1160 finished\n",
      " 703 / 1160 finished\n",
      " 704 / 1160 finished\n",
      " 705 / 1160 finished\n",
      " 706 / 1160 finished\n",
      " 707 / 1160 finished\n",
      " 708 / 1160 finished\n",
      " 709 / 1160 finished\n",
      " 710 / 1160 finished\n",
      " 711 / 1160 finished\n",
      " 712 / 1160 finished\n",
      " 713 / 1160 finished\n",
      " 714 / 1160 finished\n",
      " 715 / 1160 finished\n",
      " 716 / 1160 finished\n",
      " 717 / 1160 finished\n",
      " 718 / 1160 finished\n",
      " 719 / 1160 finished\n",
      " 720 / 1160 finished\n",
      " 721 / 1160 finished\n",
      " 722 / 1160 finished\n",
      " 723 / 1160 finished\n",
      " 724 / 1160 finished\n",
      " 725 / 1160 finished\n",
      " 726 / 1160 finished\n",
      " 727 / 1160 finished\n",
      " 728 / 1160 finished\n",
      " 729 / 1160 finished\n",
      " 730 / 1160 finished\n",
      " 731 / 1160 finished\n",
      " 732 / 1160 finished\n",
      " 733 / 1160 finished\n",
      " 734 / 1160 finished\n",
      " 735 / 1160 finished\n",
      " 736 / 1160 finished\n",
      " 737 / 1160 finished\n",
      " 738 / 1160 finished\n",
      " 739 / 1160 finished\n",
      " 740 / 1160 finished\n",
      " 741 / 1160 finished\n",
      " 742 / 1160 finished\n",
      " 743 / 1160 finished\n",
      " 744 / 1160 finished\n",
      " 745 / 1160 finished\n",
      " 746 / 1160 finished\n",
      " 747 / 1160 finished\n",
      " 748 / 1160 finished\n",
      " 749 / 1160 finished\n",
      " 750 / 1160 finished\n",
      " 751 / 1160 finished\n",
      " 752 / 1160 finished\n",
      " 753 / 1160 finished\n",
      " 754 / 1160 finished\n",
      " 755 / 1160 finished\n",
      " 756 / 1160 finished\n",
      " 757 / 1160 finished\n",
      " 758 / 1160 finished\n",
      " 759 / 1160 finished\n",
      " 760 / 1160 finished\n",
      " 761 / 1160 finished\n",
      " 762 / 1160 finished\n",
      " 763 / 1160 finished\n",
      " 764 / 1160 finished\n",
      " 765 / 1160 finished\n",
      " 766 / 1160 finished\n",
      " 767 / 1160 finished\n",
      " 768 / 1160 finished\n",
      " 769 / 1160 finished\n",
      " 770 / 1160 finished\n",
      " 771 / 1160 finished\n",
      " 772 / 1160 finished\n",
      " 773 / 1160 finished\n",
      " 774 / 1160 finished\n",
      " 775 / 1160 finished\n",
      " 776 / 1160 finished\n",
      " 777 / 1160 finished\n",
      " 778 / 1160 finished\n",
      " 779 / 1160 finished\n",
      " 780 / 1160 finished\n",
      " 781 / 1160 finished\n",
      " 782 / 1160 finished\n",
      " 783 / 1160 finished\n",
      " 784 / 1160 finished\n",
      " 785 / 1160 finished\n",
      " 786 / 1160 finished\n",
      " 787 / 1160 finished\n",
      " 788 / 1160 finished\n",
      " 789 / 1160 finished\n",
      " 790 / 1160 finished\n",
      " 791 / 1160 finished\n",
      " 792 / 1160 finished\n",
      " 793 / 1160 finished\n",
      " 794 / 1160 finished\n",
      " 795 / 1160 finished\n",
      " 796 / 1160 finished\n",
      " 797 / 1160 finished\n",
      " 798 / 1160 finished\n",
      " 799 / 1160 finished\n",
      " 800 / 1160 finished\n",
      " 801 / 1160 finished\n",
      " 802 / 1160 finished\n",
      " 803 / 1160 finished\n",
      " 804 / 1160 finished\n",
      " 805 / 1160 finished\n",
      " 806 / 1160 finished\n",
      " 807 / 1160 finished\n",
      " 808 / 1160 finished\n",
      " 809 / 1160 finished\n",
      " 810 / 1160 finished\n",
      " 811 / 1160 finished\n",
      " 812 / 1160 finished\n",
      " 813 / 1160 finished\n",
      " 814 / 1160 finished\n",
      " 815 / 1160 finished\n",
      " 816 / 1160 finished\n",
      " 817 / 1160 finished\n",
      " 818 / 1160 finished\n",
      " 819 / 1160 finished\n",
      " 820 / 1160 finished\n",
      " 821 / 1160 finished\n",
      " 822 / 1160 finished\n",
      " 823 / 1160 finished\n",
      " 824 / 1160 finished\n",
      " 825 / 1160 finished\n",
      " 826 / 1160 finished\n",
      " 827 / 1160 finished\n",
      " 828 / 1160 finished\n",
      " 829 / 1160 finished\n",
      " 830 / 1160 finished\n",
      " 831 / 1160 finished\n",
      " 832 / 1160 finished\n",
      " 833 / 1160 finished\n",
      " 834 / 1160 finished\n",
      " 835 / 1160 finished\n",
      " 836 / 1160 finished\n",
      " 837 / 1160 finished\n",
      " 838 / 1160 finished\n",
      " 839 / 1160 finished\n",
      " 840 / 1160 finished\n",
      " 841 / 1160 finished\n",
      " 842 / 1160 finished\n",
      " 843 / 1160 finished\n",
      " 844 / 1160 finished\n",
      " 845 / 1160 finished\n",
      " 846 / 1160 finished\n",
      " 847 / 1160 finished\n",
      " 848 / 1160 finished\n",
      " 849 / 1160 finished\n",
      " 850 / 1160 finished\n",
      " 851 / 1160 finished\n",
      " 852 / 1160 finished\n",
      " 853 / 1160 finished\n",
      " 854 / 1160 finished\n",
      " 855 / 1160 finished\n",
      " 856 / 1160 finished\n",
      " 857 / 1160 finished\n",
      " 858 / 1160 finished\n",
      " 859 / 1160 finished\n",
      " 860 / 1160 finished\n",
      " 861 / 1160 finished\n",
      " 862 / 1160 finished\n",
      " 863 / 1160 finished\n",
      " 864 / 1160 finished\n",
      " 865 / 1160 finished\n",
      " 866 / 1160 finished\n",
      " 867 / 1160 finished\n",
      " 868 / 1160 finished\n",
      " 869 / 1160 finished\n",
      " 870 / 1160 finished\n",
      " 871 / 1160 finished\n",
      " 872 / 1160 finished\n",
      " 873 / 1160 finished\n",
      " 874 / 1160 finished\n",
      " 875 / 1160 finished\n",
      " 876 / 1160 finished\n",
      " 877 / 1160 finished\n",
      " 878 / 1160 finished\n",
      " 879 / 1160 finished\n",
      " 880 / 1160 finished\n",
      " 881 / 1160 finished\n",
      " 882 / 1160 finished\n",
      " 883 / 1160 finished\n",
      " 884 / 1160 finished\n",
      " 885 / 1160 finished\n",
      " 886 / 1160 finished\n",
      " 887 / 1160 finished\n",
      " 888 / 1160 finished\n",
      " 889 / 1160 finished\n",
      " 890 / 1160 finished\n",
      " 891 / 1160 finished\n",
      " 892 / 1160 finished\n",
      " 893 / 1160 finished\n",
      " 894 / 1160 finished\n",
      " 895 / 1160 finished\n",
      " 896 / 1160 finished\n",
      " 897 / 1160 finished\n",
      " 898 / 1160 finished\n",
      " 899 / 1160 finished\n",
      " 900 / 1160 finished\n",
      " 901 / 1160 finished\n",
      " 902 / 1160 finished\n",
      " 903 / 1160 finished\n",
      " 904 / 1160 finished\n",
      " 905 / 1160 finished\n",
      " 906 / 1160 finished\n",
      " 907 / 1160 finished\n",
      " 908 / 1160 finished\n",
      " 909 / 1160 finished\n",
      " 910 / 1160 finished\n",
      " 911 / 1160 finished\n",
      " 912 / 1160 finished\n",
      " 913 / 1160 finished\n",
      " 914 / 1160 finished\n",
      " 915 / 1160 finished\n",
      " 916 / 1160 finished\n",
      " 917 / 1160 finished\n",
      " 918 / 1160 finished\n",
      " 919 / 1160 finished\n",
      " 920 / 1160 finished\n",
      " 921 / 1160 finished\n",
      " 922 / 1160 finished\n",
      " 923 / 1160 finished\n",
      " 924 / 1160 finished\n",
      " 925 / 1160 finished\n",
      " 926 / 1160 finished\n",
      " 927 / 1160 finished\n",
      " 928 / 1160 finished\n",
      " 929 / 1160 finished\n",
      " 930 / 1160 finished\n",
      " 931 / 1160 finished\n",
      " 932 / 1160 finished\n",
      " 933 / 1160 finished\n",
      " 934 / 1160 finished\n",
      " 935 / 1160 finished\n",
      " 936 / 1160 finished\n",
      " 937 / 1160 finished\n",
      " 938 / 1160 finished\n",
      " 939 / 1160 finished\n",
      " 940 / 1160 finished\n",
      " 941 / 1160 finished\n",
      " 942 / 1160 finished\n",
      " 943 / 1160 finished\n",
      " 944 / 1160 finished\n",
      " 945 / 1160 finished\n",
      " 946 / 1160 finished\n",
      " 947 / 1160 finished\n",
      " 948 / 1160 finished\n",
      " 949 / 1160 finished\n",
      " 950 / 1160 finished\n",
      " 951 / 1160 finished\n",
      " 952 / 1160 finished\n",
      " 953 / 1160 finished\n",
      " 954 / 1160 finished\n",
      " 955 / 1160 finished\n",
      " 956 / 1160 finished\n",
      " 957 / 1160 finished\n",
      " 958 / 1160 finished\n",
      " 959 / 1160 finished\n",
      " 960 / 1160 finished\n",
      " 961 / 1160 finished\n",
      " 962 / 1160 finished\n",
      " 963 / 1160 finished\n",
      " 964 / 1160 finished\n",
      " 965 / 1160 finished\n",
      " 966 / 1160 finished\n",
      " 967 / 1160 finished\n",
      " 968 / 1160 finished\n",
      " 969 / 1160 finished\n",
      " 970 / 1160 finished\n",
      " 971 / 1160 finished\n",
      " 972 / 1160 finished\n",
      " 973 / 1160 finished\n",
      " 974 / 1160 finished\n",
      " 975 / 1160 finished\n",
      " 976 / 1160 finished\n",
      " 977 / 1160 finished\n",
      " 978 / 1160 finished\n",
      " 979 / 1160 finished\n",
      " 980 / 1160 finished\n",
      " 981 / 1160 finished\n",
      " 982 / 1160 finished\n",
      " 983 / 1160 finished\n",
      " 984 / 1160 finished\n",
      " 985 / 1160 finished\n",
      " 986 / 1160 finished\n",
      " 987 / 1160 finished\n",
      " 988 / 1160 finished\n",
      " 989 / 1160 finished\n",
      " 990 / 1160 finished\n",
      " 991 / 1160 finished\n",
      " 992 / 1160 finished\n",
      " 993 / 1160 finished\n",
      " 994 / 1160 finished\n",
      " 995 / 1160 finished\n",
      " 996 / 1160 finished\n",
      " 997 / 1160 finished\n",
      " 998 / 1160 finished\n",
      " 999 / 1160 finished\n",
      " 1000 / 1160 finished\n",
      " 1001 / 1160 finished\n",
      " 1002 / 1160 finished\n",
      " 1003 / 1160 finished\n",
      " 1004 / 1160 finished\n",
      " 1005 / 1160 finished\n",
      " 1006 / 1160 finished\n",
      " 1007 / 1160 finished\n",
      " 1008 / 1160 finished\n",
      " 1009 / 1160 finished\n",
      " 1010 / 1160 finished\n",
      " 1011 / 1160 finished\n",
      " 1012 / 1160 finished\n",
      " 1013 / 1160 finished\n",
      " 1014 / 1160 finished\n",
      " 1015 / 1160 finished\n",
      " 1016 / 1160 finished\n",
      " 1017 / 1160 finished\n",
      " 1018 / 1160 finished\n",
      " 1019 / 1160 finished\n",
      " 1020 / 1160 finished\n",
      " 1021 / 1160 finished\n",
      " 1022 / 1160 finished\n",
      " 1023 / 1160 finished\n",
      " 1024 / 1160 finished\n",
      " 1025 / 1160 finished\n",
      " 1026 / 1160 finished\n",
      " 1027 / 1160 finished\n",
      " 1028 / 1160 finished\n",
      " 1029 / 1160 finished\n",
      " 1030 / 1160 finished\n",
      " 1031 / 1160 finished\n",
      " 1032 / 1160 finished\n",
      " 1033 / 1160 finished\n",
      " 1034 / 1160 finished\n",
      " 1035 / 1160 finished\n",
      " 1036 / 1160 finished\n",
      " 1037 / 1160 finished\n",
      " 1038 / 1160 finished\n",
      " 1039 / 1160 finished\n",
      " 1040 / 1160 finished\n",
      " 1041 / 1160 finished\n",
      " 1042 / 1160 finished\n",
      " 1043 / 1160 finished\n",
      " 1044 / 1160 finished\n",
      " 1045 / 1160 finished\n",
      " 1046 / 1160 finished\n",
      " 1047 / 1160 finished\n",
      " 1048 / 1160 finished\n",
      " 1049 / 1160 finished\n",
      " 1050 / 1160 finished\n",
      " 1051 / 1160 finished\n",
      " 1052 / 1160 finished\n",
      " 1053 / 1160 finished\n",
      " 1054 / 1160 finished\n",
      " 1055 / 1160 finished\n",
      " 1056 / 1160 finished\n",
      " 1057 / 1160 finished\n",
      " 1058 / 1160 finished\n",
      " 1059 / 1160 finished\n",
      " 1060 / 1160 finished\n",
      " 1061 / 1160 finished\n",
      " 1062 / 1160 finished\n",
      " 1063 / 1160 finished\n",
      " 1064 / 1160 finished\n",
      " 1065 / 1160 finished\n",
      " 1066 / 1160 finished\n",
      " 1067 / 1160 finished\n",
      " 1068 / 1160 finished\n",
      " 1069 / 1160 finished\n",
      " 1070 / 1160 finished\n",
      " 1071 / 1160 finished\n",
      " 1072 / 1160 finished\n",
      " 1073 / 1160 finished\n",
      " 1074 / 1160 finished\n",
      " 1075 / 1160 finished\n",
      " 1076 / 1160 finished\n",
      " 1077 / 1160 finished\n",
      " 1078 / 1160 finished\n",
      " 1079 / 1160 finished\n",
      " 1080 / 1160 finished\n",
      " 1081 / 1160 finished\n",
      " 1082 / 1160 finished\n",
      " 1083 / 1160 finished\n",
      " 1084 / 1160 finished\n",
      " 1085 / 1160 finished\n",
      " 1086 / 1160 finished\n",
      " 1087 / 1160 finished\n",
      " 1088 / 1160 finished\n",
      " 1089 / 1160 finished\n",
      " 1090 / 1160 finished\n",
      " 1091 / 1160 finished\n",
      " 1092 / 1160 finished\n",
      " 1093 / 1160 finished\n",
      " 1094 / 1160 finished\n",
      " 1095 / 1160 finished\n",
      " 1096 / 1160 finished\n",
      " 1097 / 1160 finished\n",
      " 1098 / 1160 finished\n",
      " 1099 / 1160 finished\n",
      " 1100 / 1160 finished\n",
      " 1101 / 1160 finished\n",
      " 1102 / 1160 finished\n",
      " 1103 / 1160 finished\n",
      " 1104 / 1160 finished\n",
      " 1105 / 1160 finished\n",
      " 1106 / 1160 finished\n",
      " 1107 / 1160 finished\n",
      " 1108 / 1160 finished\n",
      " 1109 / 1160 finished\n",
      " 1110 / 1160 finished\n",
      " 1111 / 1160 finished\n",
      " 1112 / 1160 finished\n",
      " 1113 / 1160 finished\n",
      " 1114 / 1160 finished\n",
      " 1115 / 1160 finished\n",
      " 1116 / 1160 finished\n",
      " 1117 / 1160 finished\n",
      " 1118 / 1160 finished\n",
      " 1119 / 1160 finished\n",
      " 1120 / 1160 finished\n",
      " 1121 / 1160 finished\n",
      " 1122 / 1160 finished\n",
      " 1123 / 1160 finished\n",
      " 1124 / 1160 finished\n",
      " 1125 / 1160 finished\n",
      " 1126 / 1160 finished\n",
      " 1127 / 1160 finished\n",
      " 1128 / 1160 finished\n",
      " 1129 / 1160 finished\n",
      " 1130 / 1160 finished\n",
      " 1131 / 1160 finished\n",
      " 1132 / 1160 finished\n",
      " 1133 / 1160 finished\n",
      " 1134 / 1160 finished\n",
      " 1135 / 1160 finished\n",
      " 1136 / 1160 finished\n",
      " 1137 / 1160 finished\n",
      " 1138 / 1160 finished\n",
      " 1139 / 1160 finished\n",
      " 1140 / 1160 finished\n",
      " 1141 / 1160 finished\n",
      " 1142 / 1160 finished\n",
      " 1143 / 1160 finished\n",
      " 1144 / 1160 finished\n",
      " 1145 / 1160 finished\n",
      " 1146 / 1160 finished\n",
      " 1147 / 1160 finished\n",
      " 1148 / 1160 finished\n",
      " 1149 / 1160 finished\n",
      " 1150 / 1160 finished\n",
      " 1151 / 1160 finished\n",
      " 1152 / 1160 finished\n",
      " 1153 / 1160 finished\n",
      " 1154 / 1160 finished\n",
      " 1155 / 1160 finished\n",
      " 1156 / 1160 finished\n",
      " 1157 / 1160 finished\n",
      " 1158 / 1160 finished\n",
      " 1159 / 1160 finished\n",
      " 1160 / 1160 finished\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "sys.path.append('../')\n",
    "import cv2\n",
    "import os\n",
    "import numpy as np\n",
    "import dataset_utilty.voc_label_utility as VL\n",
    "ANNO_ROOT = '/media/gejunyao/Disk1/Datasets/SSDD/VOC2012/annotations_r/'\n",
    "IMG_ROOT = '/media/gejunyao/Disk1/Datasets/SSDD/VOC2012/JPEGImages/'\n",
    "OUT_ROOT = '/media/gejunyao/Disk1/Datasets/SSDD/VOC2012/annotation_mask/'\n",
    "\n",
    "img_files = os.listdir(IMG_ROOT)\n",
    "num_file = len(img_files)\n",
    "for k, img_f in enumerate(img_files):\n",
    "    name_l = img_f.split('.')\n",
    "    name = ''\n",
    "    for i in range(len(name_l)-1):\n",
    "        name += name_l[i]+'.'\n",
    "    img_path = os.path.join(IMG_ROOT, img_f)\n",
    "    anno_path = os.path.join(ANNO_ROOT, name+'xml')\n",
    "    img = cv2.imread(img_path)\n",
    "    h, w, c = img.shape\n",
    "    seg_mask = np.zeros((h, w), dtype=np.uint8)\n",
    "    anno_info, anno_obj = VL.voc_label_preprocess(anno_path)\n",
    "    for obj in anno_obj:\n",
    "        dict_obj = VL.voc_object_xml_element_resolver(obj)\n",
    "        pos = dict_obj['bndbox']\n",
    "        p0 = (int(pos['x1']), int(pos['y1'])) #top-left\n",
    "        p1 = (int(pos['x2']), int(pos['y2'])) #top-right\n",
    "        p2 = (int(pos['x3']), int(pos['y3'])) #down-left\n",
    "        p3 = (int(pos['x4']), int(pos['y4'])) #down-right\n",
    "        seg_mask = cv2.drawContours(seg_mask, [np.array((p0, p1, p2, p3))], 0, 1, -1)\n",
    "    cv2.imwrite(os.path.join(OUT_ROOT, img_f.split('.')[0] + '.png'), seg_mask)  \n",
    "    print(' {} / {} finished'.format(k+1, num_file))\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using matplotlib backend: TkAgg\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "sys.path.append('/media/gejunyao/Disk/Gejunyao/develop/toolbox-for-voc-dataset/')\n",
    "\n",
    "import mmcv\n",
    "import os\n",
    "import cv2\n",
    "import numpy as np\n",
    "from utils.ssdd_base_reader import SSDDRboxReader\n",
    "from mmdet.core import BitmapMasks\n",
    "import matplotlib.pyplot as plt\n",
    "ANNO_ROOT = '/media/gejunyao/Disk1/Datasets/SSDD/VOC2012/annotations_r/'\n",
    "%matplotlib"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EdgeCenterHMGenerator(SSDDRboxReader):\n",
    "\n",
    "    def __init__(self, anno_root, theta=0.2) -> None:\n",
    "        super().__init__(anno_root)\n",
    "        self.theta = theta\n",
    "        self.squeeze_ratio = 1\n",
    "\n",
    "    def __get_gaussian_map(self, gt_bbox_shape, theta=1):\n",
    "\n",
    "        _, _, w, h, a = gt_bbox_shape\n",
    "\n",
    "        # generate a x, y grid\n",
    "        X, Y = np.meshgrid(np.arange(w), np.arange(h))\n",
    "        cy, cx = (h - 1) / 2, (w - 1) / 2\n",
    "\n",
    "        # get the values for theta_x, theta_y\n",
    "        theta_x = w * theta\n",
    "        theta_y = h * theta\n",
    "        # start calculate the gaussian map\n",
    "        # gaussion map for target center\n",
    "        G_tc =   np.square(X - cx) / (2 * theta_x ** 2) \\\n",
    "               + np.square(Y - cy) / (2 * theta_y ** 2)\n",
    "\n",
    "        shortside_idx = int(w > h)\n",
    "        # gaussion map for shortside center\n",
    "        G_A = np.square(X - cx) / (2 * (theta_x * self.squeeze_ratio) ** 2) \\\n",
    "            + np.square(Y - cy) / (2 * (theta_y / self.squeeze_ratio) ** 2)\n",
    "        G_B = np.square(X - cx) / (2 * (theta_x / self.squeeze_ratio) ** 2) \\\n",
    "            + np.square(Y - cy) / (2 * (theta_y * self.squeeze_ratio) ** 2)\n",
    "\n",
    "        G = np.zeros((G_tc.shape[0], G_tc.shape[1], 3))\n",
    "        \n",
    "        G[...,0], G[...,1], G[...,2] = G_tc, (G_A, G_B)[shortside_idx], (G_A, G_B)[1-shortside_idx]\n",
    "        G = np.exp(-G)\n",
    "        G = mmcv.imrotate(G, a, auto_bound=True)\n",
    "        G = G / np.max(G.reshape(-1, 3),axis=0)\n",
    "\n",
    "        return G\n",
    "\n",
    "    def __get_bbox_from_rbox(self, rbox_pts):\n",
    "\n",
    "        x_min, y_min, x_max, y_max = rbox_pts[0][0], rbox_pts[0][1], rbox_pts[0][0], rbox_pts[0][1]\n",
    "        for pt in rbox_pts:\n",
    "            if pt[0] < x_min:\n",
    "                x_min = pt[0]\n",
    "            if pt[0] > x_max:\n",
    "                x_max = pt[0]\n",
    "            if pt[1] < y_min:\n",
    "                y_min = pt[1]\n",
    "            if pt[1] > y_max:\n",
    "                y_max = pt[1]\n",
    "\n",
    "        return int(x_min + 0.5), int(y_min + 0.5), int(x_max + 0.5), int(y_max + 0.5)\n",
    "\n",
    "\n",
    "    def __call__(self, results):\n",
    "\n",
    "        img_h, img_w, _ = results['img_shape']\n",
    "        gt_bboxes = results['gt_bboxes']\n",
    "        corner_pts = []\n",
    "        # gaussion_maps is a list of tuples, a tuple contains:\n",
    "        # ((x, y), gaussion_map)\n",
    "        gaussion_maps = []\n",
    "        # mask channel 0~2 represents: target center, short-side center, long-side center\n",
    "        pseudo_mask = np.zeros((img_h, img_w, 3))\n",
    "        # generate gaussion map and corner points for each rbox\n",
    "        for gt_bbox in gt_bboxes:\n",
    "            x, y, w, h, a = gt_bbox[0], gt_bbox[1], gt_bbox[2], gt_bbox[3], gt_bbox[4]\n",
    "            pts = cv2.boxPoints(((x, y), (w, h), a))\n",
    "            bbox = self.__get_bbox_from_rbox(pts)\n",
    "            corner_pts.append(pts)\n",
    "            gaussion = self.__get_gaussian_map((x, y, w, h, a), self.theta)\n",
    "            # gaussion = gaussion / np.max(gaussion.reshape(-1, 3),axis=0)\n",
    "            gaussion_maps.append(gaussion)\n",
    "\n",
    "        # calculate edge centers for each rbox\n",
    "        corner_pts = np.array(corner_pts)\n",
    "        d_12 = np.square(corner_pts[:,0,0]-corner_pts[:,1,0]) + \\\n",
    "               np.square(corner_pts[:,0,1]-corner_pts[:,1,1])\n",
    "        d_23 = np.square(corner_pts[:,1,0]-corner_pts[:,2,0]) + \\\n",
    "               np.square(corner_pts[:,1,1]-corner_pts[:,2,1])\n",
    "        is_d23_longer = d_12 < d_23\n",
    "        num_box = len(is_d23_longer)\n",
    "        box_index = np.arange(num_box)\n",
    "        box_index = np.hstack((box_index, box_index)) * 4\n",
    "        long_side_start_index = np.int0(is_d23_longer)\n",
    "        long_side_start_index = np.hstack((long_side_start_index, long_side_start_index + 2))\n",
    "        long_side_end_index = (long_side_start_index + 1) % 4\n",
    "        short_side_start_index = long_side_end_index\n",
    "        short_side_end_index = (short_side_start_index + 1) % 4\n",
    "        corner_pts = corner_pts.reshape(-1, 2)\n",
    "        long_side_center = (corner_pts[box_index + long_side_start_index,:] +\n",
    "                            corner_pts[box_index + long_side_end_index,:]) / 2\n",
    "        short_side_center = (corner_pts[box_index + short_side_start_index,:] +\n",
    "                            corner_pts[box_index + short_side_end_index,:]) / 2 \n",
    "        target_center = (corner_pts[box_index[:num_box], :] + \n",
    "                         corner_pts[box_index[:num_box] + 2, :]) / 2 \n",
    "\n",
    "        for k, gaussian_map in enumerate(gaussion_maps):\n",
    "            # set guassion maps for center points\n",
    "            tc = target_center[k],\n",
    "            sc = short_side_center[k], short_side_center[k + num_box]\n",
    "            lc = long_side_center[k], long_side_center[k + num_box]\n",
    "            gmap = gaussian_map\n",
    "            g_h, g_w, _ = gmap.shape\n",
    "\n",
    "            for v, centers in enumerate((tc, sc, lc)):\n",
    "                for center in centers:\n",
    "                    x, y = center[0], center[1]\n",
    "                    y_start, y_end = int(y-g_h/2 + 0.5), int(y+g_h/2 + 0.5)\n",
    "                    x_start, x_end = int(x-g_w/2 + 0.5), int(x+g_w/2 + 0.5)\n",
    "                    y_s, x_s = max(0, y_start), max(0, x_start)\n",
    "                    y_e, x_e = min(img_h, y_end), min(img_w, x_end)\n",
    "                    ori_mask = pseudo_mask[y_s: y_e, x_s: x_e, v]\n",
    "                    y_offset, x_offset = g_h - (y_end-y_start), g_w - (x_end-x_start)\n",
    "                    gmap_temp = gmap[y_s-y_start+y_offset:g_h - (y_end-y_e), x_s-x_start+x_offset:g_w - (x_end-x_e), v]\n",
    "                    pseudo_mask[y_s: y_e, x_s: x_e, v] = np.where(gmap_temp > ori_mask, gmap_temp, ori_mask)\n",
    "\n",
    "            \n",
    "        pseudo_mask = pseudo_mask.transpose(2,0,1)\n",
    "        pseudo_mask[pseudo_mask >= 1] = 1.0\n",
    "        return pseudo_mask, long_side_center, short_side_center, target_center\n",
    "\n",
    "if __name__ == '__main__':\n",
    "\n",
    "    ID = 230\n",
    "    IMG_GT_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/ssdd/ssdd_gt_r/'\n",
    "    IMG_ROOT = '/media/gejunyao/Disk1/Datasets/SSDD/VOC2012/JPEGImages/'\n",
    "\n",
    "    reader = EdgeCenterHMGenerator(ANNO_ROOT)\n",
    "    result = reader.get_mmdet_pipeline_result(ID)\n",
    "    img_name = reader.anno_files[ID].split('.')[0] + '.jpg'\n",
    "    img_path = os.path.join(IMG_ROOT, img_name)\n",
    "    gt_img_path = os.path.join(IMG_GT_ROOT, img_name)\n",
    "    pseudo_mask, long_side_center, short_side_center, target_center = reader(result)\n",
    "    img = cv2.imread(img_path)\n",
    "    gt_img = cv2.imread(gt_img_path)\n",
    "    for center in long_side_center:\n",
    "        cv2.circle(gt_img, np.int0(center + 0.5), 4, (0, 255, 64), -1) \n",
    "    for center in short_side_center:\n",
    "        cv2.circle(gt_img, np.int0(center + 0.5), 4, (0, 128, 0), -1) \n",
    "    for center in target_center:\n",
    "        cv2.circle(gt_img, np.int0(center + 0.5), 5, (0, 128, 128), -1) \n",
    "\n",
    "    plt.figure()\n",
    "    plt.subplot(2,2,1)\n",
    "    plt.title('ground truth')\n",
    "    plt.imshow(gt_img)    \n",
    "    plt.subplot(2,2,2)\n",
    "    plt.title('target center heatmap')\n",
    "    plt.imshow(img)\n",
    "    plt.imshow(pseudo_mask[0,...], alpha=0.5)\n",
    "    plt.subplot(2,2,3)\n",
    "    plt.title('short-side center heatmap')\n",
    "    plt.imshow(img)\n",
    "    plt.imshow(pseudo_mask[1,...], alpha=0.5)\n",
    "    plt.subplot(2,2,4)\n",
    "    plt.title('long-side center heatmap')\n",
    "    plt.imshow(img)\n",
    "    plt.imshow(pseudo_mask[2,...], alpha=0.5)\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 可视化ExtremeShip生成的关键点热图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 143,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using matplotlib backend: TkAgg\n"
     ]
    }
   ],
   "source": [
    "import mmcv\n",
    "import torch\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os.path as osp\n",
    "import torch.nn.functional as F\n",
    "%matplotlib\n",
    "IMG_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/ssdd/ssdd_gt_r/'\n",
    "CACHE_ROOT = '/home/gejunyao/ramdisk/ExtremeShipCache/'\n",
    "\n",
    "IMG_ID = '001112'\n",
    "\n",
    "def _local_maximum(heat, kernel=3):\n",
    "    \"\"\"Extract local maximum pixel with given kernal.\n",
    "\n",
    "    Args:\n",
    "        heat (Tensor): Target heatmap.\n",
    "        kernel (int): Kernel size of max pooling. Default: 3.\n",
    "\n",
    "    Returns:\n",
    "        heat (Tensor): A heatmap where local maximum pixels maintain its\n",
    "            own value and other positions are 0.\n",
    "    \"\"\"\n",
    "    pad = (kernel - 1) // 2\n",
    "    hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)\n",
    "    keep = (hmax == heat).float()\n",
    "    background = (1 - keep) * 0.1\n",
    "    return heat * (keep + background)\n",
    "\n",
    "# load heatmaps\n",
    "dict_heatmaps = mmcv.load(osp.join(CACHE_ROOT, IMG_ID+'.pkl'))\n",
    "\n",
    "target_center = dict_heatmaps['tc'].sigmoid()\n",
    "longside_center = dict_heatmaps['lc'].sigmoid()\n",
    "shortside_center = dict_heatmaps['sc'].sigmoid()\n",
    "\n",
    "# target_center = _local_maximum(target_center)[0]\n",
    "# longside_center = _local_maximum(longside_center)[0]\n",
    "# shortside_center = _local_maximum(shortside_center)[0]\n",
    "\n",
    "target_center = target_center[0]\n",
    "longside_center = longside_center[0]\n",
    "shortside_center = shortside_center[0]\n",
    "\n",
    "ori_img = mmcv.imread(osp.join(IMG_ROOT, IMG_ID+'.jpg'))\n",
    "plt.figure()\n",
    "plt.subplot(2,2,1)\n",
    "plt.imshow(ori_img)\n",
    "plt.subplot(2,2,2)\n",
    "plt.title('target center')\n",
    "plt.imshow(target_center)\n",
    "plt.subplot(2,2,3)\n",
    "plt.title('longside center')\n",
    "plt.imshow(longside_center)\n",
    "plt.subplot(2,2,4)\n",
    "plt.title('shortside center')\n",
    "plt.imshow(shortside_center)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 保留完整高斯的label 生成"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "Author: SlytherinGe\n",
    "LastEditTime: 2021-11-09 17:01:01\n",
    "'''\n",
    "import mmcv\n",
    "import cv2\n",
    "import numpy as np\n",
    "import os\n",
    "from mmdet.core import BitmapMasks\n",
    "\n",
    "from ..builder import PIPELINES\n",
    "\n",
    "SMALL_NUM = 1e-9\n",
    "TEMP_ROOT = '/home/gejunyao/ramdisk/error_img/'\n",
    "\n",
    "@PIPELINES.register_module()\n",
    "class EdgeCenterGMGenerator(object):\n",
    "\n",
    "    def __init__(self, theta=0.2) -> None:\n",
    "        super().__init__()\n",
    "        self.theta = theta\n",
    "\n",
    "\n",
    "    def __get_gaussian_map(self, gt_bbox_shape, theta=1):\n",
    "\n",
    "        _, _, w, h, a = gt_bbox_shape\n",
    "\n",
    "        # generate a x, y grid\n",
    "        X, Y = np.meshgrid(np.arange(w), np.arange(h))\n",
    "        cy, cx = (h - 1) / 2, (w - 1) / 2\n",
    "\n",
    "        # get the values for theta_x, theta_y\n",
    "        theta_x = w * theta\n",
    "        theta_y = h * theta\n",
    "        # start calculate the gaussian map\n",
    "        G =   np.square(X - cx) / (2 * theta_x ** 2) \\\n",
    "            + np.square(Y - cy) / (2 * theta_y ** 2)\n",
    "        \n",
    "        G = np.exp(-G)\n",
    "\n",
    "        G = mmcv.imrotate(G, a, auto_bound=True)\n",
    "        G = G / np.max(G)\n",
    "\n",
    "        return G\n",
    "\n",
    "    def __get_bbox_from_rbox(self, rbox_pts):\n",
    "\n",
    "        x_min, y_min, x_max, y_max = rbox_pts[0][0], rbox_pts[0][1], rbox_pts[0][0], rbox_pts[0][1]\n",
    "        for pt in rbox_pts:\n",
    "            if pt[0] < x_min:\n",
    "                x_min = pt[0]\n",
    "            if pt[0] > x_max:\n",
    "                x_max = pt[0]\n",
    "            if pt[1] < y_min:\n",
    "                y_min = pt[1]\n",
    "            if pt[1] > y_max:\n",
    "                y_max = pt[1]\n",
    "\n",
    "        return int(x_min + 0.5), int(y_min + 0.5), int(x_max + 0.5), int(y_max + 0.5)\n",
    "\n",
    "\n",
    "    def __call__(self, results):\n",
    "\n",
    "        img_h, img_w, _ = results['pad_shape']\n",
    "        gt_bboxes = results['gt_bboxes']\n",
    "        corner_pts = []\n",
    "        # gaussion_maps is a list of tuples, a tuple contains:\n",
    "        # ((x, y), gaussion_map)\n",
    "        gaussion_maps = []\n",
    "        # mask channel 0~2 represents: target center, short-side center, long-side center\n",
    "        pseudo_mask = np.zeros((img_h, img_w, 3))\n",
    "        # generate gaussion map and corner points for each rbox\n",
    "        for gt_bbox in gt_bboxes:\n",
    "            x, y, w, h, a = gt_bbox[0], gt_bbox[1], gt_bbox[2], gt_bbox[3], gt_bbox[4]\n",
    "            pts = cv2.boxPoints(((x, y), (w, h), a))\n",
    "            bbox = self.__get_bbox_from_rbox(pts)\n",
    "            corner_pts.append(pts)\n",
    "            gaussion = self.__get_gaussian_map((x, y, w, h, a), self.theta)\n",
    "            # gaussion = gaussion / np.max(gaussion.reshape(-1, 3),axis=0)\n",
    "            gaussion_maps.append(gaussion)\n",
    "\n",
    "        # calculate edge centers for each rbox\n",
    "        corner_pts = np.array(corner_pts)\n",
    "        d_12 = np.square(corner_pts[:,0,0]-corner_pts[:,1,0]) + \\\n",
    "               np.square(corner_pts[:,0,1]-corner_pts[:,1,1])\n",
    "        d_23 = np.square(corner_pts[:,1,0]-corner_pts[:,2,0]) + \\\n",
    "               np.square(corner_pts[:,1,1]-corner_pts[:,2,1])\n",
    "        is_d23_longer = d_12 < d_23\n",
    "        num_box = len(is_d23_longer)\n",
    "        box_index = np.arange(num_box)\n",
    "        box_index = np.hstack((box_index, box_index)) * 4\n",
    "        long_side_start_index = np.int0(is_d23_longer)\n",
    "        long_side_start_index = np.hstack((long_side_start_index, long_side_start_index + 2))\n",
    "        long_side_end_index = (long_side_start_index + 1) % 4\n",
    "        short_side_start_index = long_side_end_index\n",
    "        short_side_end_index = (short_side_start_index + 1) % 4\n",
    "        corner_pts = corner_pts.reshape(-1, 2)\n",
    "        long_side_center = (corner_pts[box_index + long_side_start_index,:] +\n",
    "                            corner_pts[box_index + long_side_end_index,:]) / 2\n",
    "        short_side_center = (corner_pts[box_index + short_side_start_index,:] +\n",
    "                            corner_pts[box_index + short_side_end_index,:]) / 2 \n",
    "        target_center = (corner_pts[box_index[:num_box], :] + \n",
    "                         corner_pts[box_index[:num_box] + 2, :]) / 2 \n",
    "\n",
    "        for k, gaussian_map in enumerate(gaussion_maps):\n",
    "            # set guassion maps for center points\n",
    "            tc = target_center[k],\n",
    "            sc = short_side_center[k], short_side_center[k + num_box]\n",
    "            lc = long_side_center[k], long_side_center[k + num_box]\n",
    "            gmap = gaussian_map\n",
    "            g_h, g_w = gmap.shape\n",
    "\n",
    "            for v, centers in enumerate((tc, sc, lc)):\n",
    "                for center in centers:\n",
    "                    x, y = center[0], center[1]\n",
    "                    y_start, y_end = int(y-g_h/2 + 0.5), int(y+g_h/2 + 0.5)\n",
    "                    x_start, x_end = int(x-g_w/2 + 0.5), int(x+g_w/2 + 0.5)\n",
    "                    y_s, x_s = max(0, y_start), max(0, x_start)\n",
    "                    y_e, x_e = min(img_h, y_end), min(img_w, x_end)\n",
    "                    ori_mask = pseudo_mask[y_s: y_e, x_s: x_e, v]\n",
    "                    y_offset, x_offset = g_h - (y_end-y_start), g_w - (x_end-x_start)\n",
    "                    gmap_temp = gmap[y_s-y_start+y_offset:g_h - (y_end-y_e), x_s-x_start+x_offset:g_w - (x_end-x_e)]\n",
    "                    pseudo_mask[y_s: y_e, x_s: x_e, v] = np.where(gmap_temp > ori_mask, gmap_temp, ori_mask)\n",
    "\n",
    "        pseudo_mask[pseudo_mask > 1] = 1.0\n",
    "\n",
    "        pseudo_mask = pseudo_mask.transpose(2,0,1)\n",
    "\n",
    "        results['gt_masks'] = BitmapMasks(pseudo_mask, img_h, img_w)\n",
    "        results['mask_fields'].append('gt_masks')\n",
    "\n",
    "        return results"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 对比横向对比不同的label训练出来的关键点检测结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 138,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using matplotlib backend: TkAgg\n"
     ]
    },
    {
     "ename": "NotADirectoryError",
     "evalue": "[Errno 20] Not a directory: '/home/gejunyao/ramdisk/ExtremeShipCache/000005.pkl/000762.pkl'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mNotADirectoryError\u001b[0m                        Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_9159/1541125725.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     41\u001b[0m \u001b[0mn_folders\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcache_folders\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     42\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcache_folders\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 43\u001b[0;31m     \u001b[0mtc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlc\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m__read_cache_file\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mCACHE_ROOT\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mIMG_ID\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     44\u001b[0m     \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msubplot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_folders\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mn_folders\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mk\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     45\u001b[0m     \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtitle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'target center, cache {}'\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mk\u001b[0m \u001b[0;34m+\u001b[0m\u001b[0;36m1\u001b[0m \u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/tmp/ipykernel_9159/1541125725.py\u001b[0m in \u001b[0;36m__read_cache_file\u001b[0;34m(cache_root, cache_folder, img_id)\u001b[0m\n\u001b[1;32m     25\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__read_cache_file\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcache_root\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcache_folder\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimg_id\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     26\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 27\u001b[0;31m     \u001b[0mdict_heatmaps\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmmcv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mosp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcache_root\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcache_folder\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimg_id\u001b[0m\u001b[0;34m+\u001b[0m\u001b[0;34m'.pkl'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     28\u001b[0m     \u001b[0mtarget_center\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict_heatmaps\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'tc'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msigmoid\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     29\u001b[0m     \u001b[0mlongside_center\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdict_heatmaps\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'lc'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msigmoid\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/rmml/lib/python3.7/site-packages/mmcv-1.3.13-py3.7.egg/mmcv/fileio/io.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(file, file_format, **kwargs)\u001b[0m\n\u001b[1;32m     39\u001b[0m     \u001b[0mhandler\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfile_handlers\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mfile_format\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     40\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0mis_str\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 41\u001b[0;31m         \u001b[0mobj\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhandler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_from_path\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     42\u001b[0m     \u001b[0;32melif\u001b[0m \u001b[0mhasattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'read'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     43\u001b[0m         \u001b[0mobj\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhandler\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_from_fileobj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfile\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/rmml/lib/python3.7/site-packages/mmcv-1.3.13-py3.7.egg/mmcv/fileio/handlers/pickle_handler.py\u001b[0m in \u001b[0;36mload_from_path\u001b[0;34m(self, filepath, **kwargs)\u001b[0m\n\u001b[1;32m     12\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mload_from_path\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     13\u001b[0m         return super(PickleHandler, self).load_from_path(\n\u001b[0;32m---> 14\u001b[0;31m             filepath, mode='rb', **kwargs)\n\u001b[0m\u001b[1;32m     15\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     16\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mdump_to_str\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/envs/rmml/lib/python3.7/site-packages/mmcv-1.3.13-py3.7.egg/mmcv/fileio/handlers/base.py\u001b[0m in \u001b[0;36mload_from_path\u001b[0;34m(self, filepath, mode, **kwargs)\u001b[0m\n\u001b[1;32m     18\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     19\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mload_from_path\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'r'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 20\u001b[0;31m         \u001b[0;32mwith\u001b[0m \u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     21\u001b[0m             \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload_from_fileobj\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     22\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mNotADirectoryError\u001b[0m: [Errno 20] Not a directory: '/home/gejunyao/ramdisk/ExtremeShipCache/000005.pkl/000762.pkl'"
     ]
    }
   ],
   "source": [
    "import mmcv\n",
    "import torch\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os.path as osp\n",
    "import os\n",
    "import torch.nn.functional as F\n",
    "%matplotlib\n",
    "IMG_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/ssdd/ssdd_gt_r/'\n",
    "CACHE_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/middle_part/ExtremeShipKPVis/Cache'\n",
    "CACHE_FILE_PREFIX = 'ExtremeShipCache'\n",
    "IMG_ID = '000762'\n",
    "cache_folders = os.listdir(CACHE_ROOT)\n",
    "\n",
    "cache_folders.sort()\n",
    "\n",
    "def _local_maximum(heat, kernel=3):\n",
    "    pad = (kernel - 1) // 2\n",
    "    hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)\n",
    "    keep = (hmax == heat).float()\n",
    "    background = (1 - keep) * 0.1\n",
    "    return heat * (keep + background)\n",
    "\n",
    "\n",
    "def __read_cache_file(cache_root, cache_folder, img_id):\n",
    "\n",
    "    dict_heatmaps = mmcv.load(osp.join(cache_root, cache_folder, img_id+'.pkl'))\n",
    "    target_center = dict_heatmaps['tc'].sigmoid()\n",
    "    longside_center = dict_heatmaps['lc'].sigmoid()\n",
    "    shortside_center = dict_heatmaps['sc'].sigmoid()\n",
    "\n",
    "    # target_center = _local_maximum(target_center)[0]\n",
    "    # longside_center = _local_maximum(longside_center)[0]\n",
    "    # shortside_center = _local_maximum(shortside_center)[0]\n",
    "    target_center = target_center[0]\n",
    "    longside_center = longside_center[0]\n",
    "    shortside_center = shortside_center[0]\n",
    "    return target_center, longside_center, shortside_center\n",
    "\n",
    "plt.figure()\n",
    "n_folders = len(cache_folders)\n",
    "for k, f in enumerate(cache_folders):\n",
    "    tc, lc, sc = __read_cache_file(CACHE_ROOT, f, IMG_ID)\n",
    "    plt.subplot(3, n_folders,0*n_folders + k + 1)\n",
    "    plt.title('target center, cache {}'.format(k +1 ))\n",
    "    plt.imshow(tc)\n",
    "    plt.subplot(3, n_folders,1*n_folders + k + 1)\n",
    "    plt.title('longside center, cache {}'.format(k +1 ))\n",
    "    plt.imshow(lc)\n",
    "    plt.subplot(3, n_folders,2*n_folders + k + 1)\n",
    "    plt.title('shortside center, cache {}'.format(k +1 ))\n",
    "    plt.imshow(sc)\n",
    "\n",
    "plt.show()\n",
    "    \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 实验带有角度的高斯斑"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 175,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([640, 640])\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import matplotlib.pyplot as plt\n",
    "import math\n",
    "\n",
    "ALPHA = -60.5\n",
    "WIDTH = 180.1\n",
    "HEIGHT = 60.6\n",
    "\n",
    "def gaussian2D_R(w, h, a, sigma_ratio, dtype=torch.float32, device='cpu'):\n",
    "    # sigma_ratio: tuple:(ratio_w, ratio_h)\n",
    "    # TODO: delete angle transformation\n",
    "    cos_theta = math.cos(a / 180 * math.pi)\n",
    "    sin_theta = math.sin(a / 180 * math.pi)\n",
    "    # calculate the min bounding box containing the rotated box\n",
    "    min_width, min_height = w * cos_theta - h * sin_theta, h * cos_theta - w * sin_theta\n",
    "    cx, cy = max(1, int(min_width / 2)), max(1, int(min_height / 2))\n",
    "\n",
    "    X = torch.arange(-cx, cx+1, dtype=dtype, device=device)\n",
    "    Y = torch.arange(-cy, cy+1, dtype=dtype, device=device)\n",
    "\n",
    "    y, x = torch.meshgrid(Y, X)\n",
    "    scale = torch.ones_like(y)\n",
    "    pos_matrix = torch.cat((x[None], y[None], scale[None])).permute(1,2,0)\n",
    "\n",
    "    rotate_matrix = torch.tensor([[cos_theta, -sin_theta, 0],[sin_theta, cos_theta,0],[0,0,1]])\n",
    "    rotated_matrix = torch.matmul(pos_matrix, rotate_matrix)\n",
    "\n",
    "    X, Y = rotated_matrix[...,0], rotated_matrix[...,1]\n",
    "    \n",
    "    theta_x = w * sigma_ratio[0]\n",
    "    theta_y = h * sigma_ratio[1]\n",
    "\n",
    "    G = X * X / (2 * theta_x * theta_x) +\\\n",
    "        Y * Y / (2 * theta_y * theta_y)\n",
    "\n",
    "    G = (-G).exp()\n",
    "\n",
    "    G[G < torch.finfo(G.dtype).eps * G.max()] = 0\n",
    "\n",
    "    return G\n",
    "\n",
    "def gen_gaussian_targetR(heatmap, cx, cy , w, h, a, sigma_ratio):\n",
    "\n",
    "    gaussian_kernel = gaussian2D_R(w, h, a,\n",
    "                                   sigma_ratio=sigma_ratio, \n",
    "                                   dtype=heatmap.dtype, \n",
    "                                   device=heatmap.device)\n",
    "    k_h, k_w = gaussian_kernel.shape\n",
    "    h_h, h_w = heatmap.shape[:2]\n",
    "    \n",
    "    half_k_h, half_k_w = (k_h - 1) // 2, (k_w - 1) // 2      \n",
    "    cx, cy = int(cx + 0.5), int(cy + 0.5)\n",
    "    left, right = min(cx, half_k_w), min(h_w - cx, half_k_w + 1)\n",
    "    top, bottom = min(cy, half_k_h), min(h_h - cy, half_k_h + 1) \n",
    "\n",
    "    masked_heatmap = heatmap[cy - top:cy + bottom, cx - left:cx + right]\n",
    "    masked_gaussian = gaussian_kernel[half_k_h - top:half_k_h + bottom,\n",
    "                                      half_k_w - left:half_k_w + right]\n",
    "\n",
    "    out_heatmap = heatmap   \n",
    "    torch.max(\n",
    "        masked_heatmap,\n",
    "        masked_gaussian,\n",
    "        out=out_heatmap[cy - top:cy + bottom, cx - left:cx + right])\n",
    "    \n",
    "    return out_heatmap\n",
    "\n",
    "if __name__ == '__main__':\n",
    "\n",
    "    heatmap = torch.zeros((640,640), dtype=torch.float32)\n",
    "    heatmap = gen_gaussian_targetR(heatmap,10.6,30.1,WIDTH, HEIGHT, ALPHA, (0.1, 0.1) )\n",
    "    gmap = gaussian2D_R(WIDTH, HEIGHT, ALPHA, (0.1, 0.1))\n",
    "    print(heatmap.shape)\n",
    "    # plt.figure()\n",
    "    plt.matshow(heatmap)\n",
    "    # plt.matshow(gmap)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 观察使用多尺度训练的ExtremeShip生成的热力图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using matplotlib backend: TkAgg\n"
     ]
    }
   ],
   "source": [
    "import mmcv\n",
    "import torch\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os.path as osp\n",
    "import torch.nn.functional as F\n",
    "%matplotlib\n",
    "IMG_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/ssdd/ssdd_gt_r/'\n",
    "CACHE_ROOT = '/home/gejunyao/ramdisk/ExtremeShipCache1/'\n",
    "\n",
    "IMG_ID = '000762'\n",
    "\n",
    "def _local_maximum(heat, kernel=3):\n",
    "    \"\"\"Extract local maximum pixel with given kernal.\n",
    "\n",
    "    Args:\n",
    "        heat (Tensor): Target heatmap.\n",
    "        kernel (int): Kernel size of max pooling. Default: 3.\n",
    "\n",
    "    Returns:\n",
    "        heat (Tensor): A heatmap where local maximum pixels maintain its\n",
    "            own value and other positions are 0.\n",
    "    \"\"\"\n",
    "    pad = (kernel - 1) // 2\n",
    "    hmax = torch.max_pool2d(heat, kernel, stride=1, padding=pad)\n",
    "    keep = (hmax == heat).float()\n",
    "    background = (1 - keep) * 0.1\n",
    "    return heat * (keep + background)\n",
    "\n",
    "# load heatmaps\n",
    "dict_heatmaps = mmcv.load(osp.join(CACHE_ROOT, IMG_ID+'.pkl'))\n",
    "\n",
    "target_center_list = dict_heatmaps['tc']\n",
    "longside_center_list = dict_heatmaps['lc']\n",
    "shortside_center_list = dict_heatmaps['sc']\n",
    "\n",
    "num_feats = len(target_center_list)\n",
    "\n",
    "img = mmcv.imread(IMG_ROOT + IMG_ID + '.jpg')\n",
    "\n",
    "plt.figure()\n",
    "for i in range(num_feats):\n",
    "    tc = target_center_list[i].sigmoid()\n",
    "    lc = longside_center_list[i].sigmoid()\n",
    "    sc = shortside_center_list[i].sigmoid()\n",
    "    # lc = _local_maximum(lc)\n",
    "    # tc = _local_maximum(tc)\n",
    "    # sc = _local_maximum(sc)\n",
    "    _, h, w = tc.shape\n",
    "    rescale_img = mmcv.imrescale(img, (h, w))\n",
    "    padded_img = mmcv.impad(rescale_img, shape=(h, w))\n",
    "    plt.subplot(3, num_feats, i+1)\n",
    "    plt.imshow(padded_img)\n",
    "    plt.imshow(tc[0], alpha=0.75)\n",
    "    plt.subplot(3, num_feats, i+1 + num_feats)\n",
    "    plt.imshow(padded_img)\n",
    "    plt.imshow(lc[0], alpha=0.75)\n",
    "    plt.subplot(3, num_feats, i+1 + 2*num_feats)\n",
    "    plt.imshow(padded_img)\n",
    "    plt.imshow(sc[0], alpha=0.75)\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 观察使用多尺度训练的ExtremeShip生成的target"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using matplotlib backend: TkAgg\n",
      "1.0\n",
      "1.0\n",
      "0.0\n",
      "0.0\n"
     ]
    }
   ],
   "source": [
    "import mmcv\n",
    "import torch\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os.path as osp\n",
    "import torch.nn.functional as F\n",
    "%matplotlib\n",
    "IMG_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/ssdd/ssdd_gt_r2/'\n",
    "CACHE_ROOT = '/home/gejunyao/ramdisk/ExtremeShipTrainingTargets'\n",
    "\n",
    "IMG_ID = '001087'\n",
    "\n",
    "def _local_maximum(heat, kernel=3):\n",
    "    \"\"\"Extract local maximum pixel with given kernal.\n",
    "\n",
    "    Args:\n",
    "        heat (Tensor): Target heatmap.\n",
    "        kernel (int): Kernel size of max pooling. Default: 3.\n",
    "\n",
    "    Returns:\n",
    "        heat (Tensor): A heatmap where local maximum pixels maintain its\n",
    "            own value and other positions are 0.\n",
    "    \"\"\"\n",
    "    pad = (kernel - 1) // 2\n",
    "    hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)\n",
    "    keep = (hmax == heat).float()\n",
    "    background = (1 - keep) * 0.1\n",
    "    return heat * (keep + background)\n",
    "\n",
    "# load heatmaps\n",
    "dict_heatmaps = mmcv.load(osp.join(CACHE_ROOT, IMG_ID+'.pkl'))\n",
    "\n",
    "target_center_list = dict_heatmaps['tc']\n",
    "longside_center_list = dict_heatmaps['lc']\n",
    "shortside_center_list = dict_heatmaps['sc']\n",
    "valid_map_list = dict_heatmaps['valid_map']\n",
    "num_feats = len(target_center_list)\n",
    "\n",
    "\n",
    "img = mmcv.imread(IMG_ROOT + IMG_ID + '.jpg')\n",
    "rescale_img = mmcv.imrescale(img, (640, 640))\n",
    "padded_img = mmcv.impad(rescale_img, shape=(640, 640))\n",
    "im_h, im_w = padded_img.shape[0], padded_img.shape[1]\n",
    "\n",
    "\n",
    "plt.figure()\n",
    "for i in range(num_feats):\n",
    "    tc = target_center_list[i]\n",
    "    lc = longside_center_list[i]\n",
    "    sc = shortside_center_list[i]\n",
    "    tc = np.array(tc[0])\n",
    "    lc = np.array(lc[0])\n",
    "    sc = np.array(sc[0])\n",
    "    h, w = tc.shape\n",
    "    # rescale_img = mmcv.imrescale(img, (h, w))\n",
    "    # padded_img = mmcv.impad(rescale_img, shape=(h, w))\n",
    "    # vp = np.array(valid_map_list[i])\n",
    "    tc = mmcv.imrescale(tc, (im_w, im_h), interpolation='nearest')\n",
    "    lc = mmcv.imrescale(lc, (im_w, im_h), interpolation='nearest')\n",
    "    sc = mmcv.imrescale(sc, (im_w, im_h), interpolation='nearest')\n",
    "    # vp = mmcv.imrescale(vp, (im_w, im_h))\n",
    "    print(np.max(tc))\n",
    "    plt.subplot(3, num_feats, i+1)\n",
    "    plt.axis('off')\n",
    "    plt.title('tc at size ({}, {})'.format(h, w))\n",
    "    plt.imshow(padded_img)\n",
    "    plt.imshow(tc, alpha=0.75)\n",
    "    plt.subplot(3, num_feats, i+1 + num_feats)\n",
    "    plt.axis('off')\n",
    "    plt.title('lc at size ({}, {})'.format(h, w))\n",
    "    plt.imshow(padded_img)\n",
    "    plt.imshow(lc, alpha=0.75)\n",
    "    plt.subplot(3, num_feats, i+1 + 2*num_feats)\n",
    "    plt.axis('off')\n",
    "    plt.title('sc at size ({}, {})'.format(h, w))\n",
    "    plt.imshow(padded_img)\n",
    "    plt.imshow(sc, alpha=0.75)\n",
    "    # plt.subplot(4, num_feats, i+1 + 3*num_feats)\n",
    "    # plt.imshow(padded_img)\n",
    "    # plt.imshow(vp, alpha=0.75)\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 对生成的关键点基于学习到的偏移量进行重建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using matplotlib backend: TkAgg\n"
     ]
    }
   ],
   "source": [
    "import mmcv\n",
    "import torch\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os.path as osp\n",
    "import torch.nn.functional as F\n",
    "import cv2\n",
    "%matplotlib\n",
    "IMG_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/ssdd/ssdd_gt_r/'\n",
    "CACHE_ROOT = '/home/gejunyao/ramdisk/ExtremeShipCache1/'\n",
    "\n",
    "IMG_ID = '000762'\n",
    "\n",
    "def _local_maximum(heat, kernel=3):\n",
    "    \"\"\"Extract local maximum pixel with given kernal.\n",
    "\n",
    "    Args:\n",
    "        heat (Tensor): Target heatmap.\n",
    "        kernel (int): Kernel size of max pooling. Default: 3.\n",
    "\n",
    "    Returns:\n",
    "        heat (Tensor): A heatmap where local maximum pixels maintain its\n",
    "            own value and other positions are 0.\n",
    "    \"\"\"\n",
    "    pad = (kernel - 1) // 2\n",
    "    hmax = torch.max_pool2d(heat, kernel, stride=1, padding=pad)\n",
    "    hmax[hmax < 0.2] = 0\n",
    "    keep = (hmax == heat).float()\n",
    "\n",
    "    return heat * keep\n",
    "\n",
    "# load heatmaps\n",
    "dict_heatmaps = mmcv.load(osp.join(CACHE_ROOT, IMG_ID+'.pkl'))\n",
    "\n",
    "target_center_list = dict_heatmaps['tc']\n",
    "longside_center_list = dict_heatmaps['lc']\n",
    "shortside_center_list = dict_heatmaps['sc']\n",
    "offset_list = dict_heatmaps['off']\n",
    "num_feats = len(target_center_list)\n",
    "\n",
    "img = mmcv.imread(IMG_ROOT + IMG_ID + '.jpg')\n",
    "im_h, im_w = (640, 640)\n",
    "rescale_img = mmcv.imrescale(img, (im_h, im_w))\n",
    "padded_img = mmcv.impad(rescale_img, shape=(im_h, im_w))\n",
    "\n",
    "multi_lvl_keypts = []\n",
    "\n",
    "color_map = dict(\n",
    "    tc = (252, 255, 166),\n",
    "    sc = (193, 255, 215),\n",
    "    lc = (181, 222, 255)\n",
    ")\n",
    "\n",
    "for i in range(num_feats):\n",
    "    # get corresponding heats\n",
    "    tc = target_center_list[i].sigmoid()\n",
    "    lc = longside_center_list[i].sigmoid()\n",
    "    sc = shortside_center_list[i].sigmoid()\n",
    "    off = offset_list[i]\n",
    "    tc = _local_maximum(tc)\n",
    "    sc = _local_maximum(sc)\n",
    "    lc = _local_maximum(lc)\n",
    "    _, feat_h, feat_w = tc.shape\n",
    "\n",
    "    stride_h, stride_w = float(im_h/feat_h), float(im_w/feat_w)\n",
    "    # get heat peaks and their offsets\n",
    "    tc_pos, sc_pos, lc_pos = torch.nonzero(tc[0]), torch.nonzero(sc[0]), torch.nonzero(lc[0])\n",
    "    tc_off, sc_off, lc_off = off[0:2, tc_pos[:,0], tc_pos[:,1]],\\\n",
    "                             off[2:4, sc_pos[:,0], sc_pos[:,1]],\\\n",
    "                             off[4:6, lc_pos[:,0], lc_pos[:,1]]\n",
    "    tc_pos, sc_pos, lc_pos = tc_pos.float(), sc_pos.float(), lc_pos.float()\n",
    "    # refine peak positions and map them to the original image\n",
    "    # tc_pos[:,0], tc_pos[:,1] = tc_pos[:,0]+tc_off[1,:], tc_pos[:,1]+tc_off[0,:]\n",
    "    # sc_pos[:,0], sc_pos[:,1] = sc_pos[:,0]+sc_off[1,:], sc_pos[:,1]+sc_off[0,:]\n",
    "    # lc_pos[:,0], lc_pos[:,1] = lc_pos[:,0]+lc_off[1,:], lc_pos[:,1]+lc_off[0,:]\n",
    "\n",
    "    tc_pos[:,0], tc_pos[:,1] = tc_pos[:,0] * stride_h, tc_pos[:,1] * stride_w\n",
    "    sc_pos[:,0], sc_pos[:,1] = sc_pos[:,0] * stride_h, sc_pos[:,1] * stride_w\n",
    "    lc_pos[:,0], lc_pos[:,1] = lc_pos[:,0] * stride_h, lc_pos[:,1] * stride_w\n",
    "\n",
    "    pos_dict = dict(\n",
    "        tc = tc_pos,\n",
    "        sc = sc_pos,\n",
    "        lc = lc_pos\n",
    "    )\n",
    "    \n",
    "    multi_lvl_keypts.append(pos_dict)\n",
    "\n",
    "multi_lvl_keypts = multi_lvl_keypts[::-1]\n",
    "# visulization for key points\n",
    "for k, keypts in enumerate(multi_lvl_keypts):\n",
    "\n",
    "    for kpt_name, kpt_values in keypts.items():\n",
    "        for value in kpt_values:\n",
    "            y, x = value\n",
    "            x, y = int(x+0.5), int(y+0.5)\n",
    "            cv2.circle(padded_img, (x, y), 16 - k*2, color_map[kpt_name], -1)\n",
    "            cv2.circle(padded_img, (x, y), 16 - k*2, (16,16,16), 1)\n",
    "\n",
    "plt.figure()\n",
    "plt.imshow(padded_img[...,::-1])\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using matplotlib backend: TkAgg\n"
     ]
    }
   ],
   "source": [
    "import mmcv\n",
    "import torch\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os.path as osp\n",
    "import torch.nn.functional as F\n",
    "import cv2\n",
    "%matplotlib\n",
    "IMG_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/ssdd/ssdd_gt_r/'\n",
    "CACHE_ROOT = '/home/gejunyao/ramdisk/ExtremeShipCache1/'\n",
    "\n",
    "IMG_ID = '000762'\n",
    "\n",
    "def _local_maximum(heat, kernel=3):\n",
    "    \"\"\"Extract local maximum pixel with given kernal.\n",
    "\n",
    "    Args:\n",
    "        heat (Tensor): Target heatmap.\n",
    "        kernel (int): Kernel size of max pooling. Default: 3.\n",
    "\n",
    "    Returns:\n",
    "        heat (Tensor): A heatmap where local maximum pixels maintain its\n",
    "            own value and other positions are 0.\n",
    "    \"\"\"\n",
    "    pad = (kernel - 1) // 2\n",
    "    hmax = torch.max_pool2d(heat, kernel, stride=1, padding=pad)\n",
    "    hmax[hmax < 0.1] = 0\n",
    "    keep = (hmax == heat).float()\n",
    "\n",
    "    return heat * keep\n",
    "\n",
    "def _key_points_matcher(feat_lvl, kpt_values, strides, confidence_thr):\n",
    "    \"\"\"Match key points of longside or shortside centers\n",
    "    \"\"\"\n",
    "    k = feat_lvl\n",
    "    kpt_values_A = kpt_values.unsqueeze(0)\n",
    "    kpt_values_B = kpt_values.unsqueeze(1)\n",
    "    ss_kpt_ct_mat = ((kpt_values_A + kpt_values_B) / 2 + 0.5).long()\n",
    "    ones = torch.ones((ss_kpt_ct_mat.shape[0], ss_kpt_ct_mat.shape[1]))\n",
    "    splr = torch.triu(ones, 1).bool()\n",
    "    ss_kpt_ct = ss_kpt_ct_mat[splr]\n",
    "    valid_ind = target_center_list[k][0][ss_kpt_ct[:, 0], ss_kpt_ct[:, 1]] > confidence_thr\n",
    "    valid_ind = torch.nonzero(valid_ind)[:,0]\n",
    "    num_kpts = ss_kpt_ct_mat.shape[0]\n",
    "    kpt_pos = []     # [(y, x)]\n",
    "    kpt_ind = 0\n",
    "    total = 0\n",
    "    if valid_ind.shape[0] == 0:\n",
    "        return kpt_pos\n",
    "    for i in range(num_kpts - 1):\n",
    "        row = num_kpts - 1 - i\n",
    "        total += row\n",
    "        while(valid_ind[kpt_ind] < total):\n",
    "            j = num_kpts - total + valid_ind[kpt_ind]\n",
    "            pt_A, pt_B = kpt_values[i]*strides, kpt_values[j]*strides\n",
    "            kpt_pos.append(((float(pt_A[0]), float(pt_A[1])), (float(pt_B[0]), float(pt_B[1]))))\n",
    "            kpt_ind += 1\n",
    "            if kpt_ind >= len(valid_ind):\n",
    "                break\n",
    "        if kpt_ind >= len(valid_ind):\n",
    "            break\n",
    "\n",
    "    return kpt_pos\n",
    "    \n",
    "def _key_points_visulization(vis_img, kpt_pos):\n",
    "\n",
    "    for kpt_pair in kpt_pos:\n",
    "        kpt_A = (kpt_pair[0] + 0.5).long().numpy()\n",
    "        kpt_B = (kpt_pair[1] + 0.5).long().numpy()\n",
    "        y1, x1 = kpt_A\n",
    "        y2, x2 = kpt_B\n",
    "        color = torch.randint(0,256, size=(3,))\n",
    "        cv2.circle(vis_img, (x1, y1), 12 - k*2, color.numpy().tolist(), -1)\n",
    "        cv2.circle(vis_img, (x2, y2), 12 - k*2, color.numpy().tolist(), -1)\n",
    "        cv2.circle(vis_img, (x2, y2), 12 - k*2, (255,255,255), 1)\n",
    "        cv2.circle(vis_img, (x1, y1), 12 - k*2, (255,255,255), 1)\n",
    "    \n",
    "    return vis_img\n",
    "\n",
    "def _paired_points_visualization(feat_lvl, vis_img, paired_kpts):\n",
    "\n",
    "    sc_pairs, lc_pairs = paired_kpts\n",
    "    len_sc_pairs, len_lc_pairs = len(sc_pairs), len(lc_pairs)\n",
    "\n",
    "    assert len_sc_pairs == len_lc_pairs\n",
    "    sc_pairs = (sc_pairs + 0.5).long().numpy()\n",
    "    lc_pairs = (lc_pairs + 0.5).long().numpy()\n",
    "\n",
    "    for i in range(len_sc_pairs):\n",
    "        lc_pair = lc_pairs[i]\n",
    "        sc_pair = sc_pairs[i]\n",
    "        color = torch.randint(0,256, size=(3,)).numpy().tolist()\n",
    "        kpts = np.vstack((lc_pair, sc_pair))\n",
    "        for kpt in kpts:\n",
    "            y, x = kpt\n",
    "            cv2.circle(vis_img, (x, y), 12 - k*2, color, -1)\n",
    "            cv2.circle(vis_img, (x, y), 12 - k*2, (255,255,255), 1)\n",
    "\n",
    "    return vis_img\n",
    "\n",
    "# load heatmaps\n",
    "dict_heatmaps = mmcv.load(osp.join(CACHE_ROOT, IMG_ID+'.pkl'))\n",
    "\n",
    "target_center_list = dict_heatmaps['tc']\n",
    "longside_center_list = dict_heatmaps['lc']\n",
    "shortside_center_list = dict_heatmaps['sc']\n",
    "offset_list = dict_heatmaps['off']\n",
    "num_feats = len(target_center_list)\n",
    "\n",
    "img = mmcv.imread(IMG_ROOT + IMG_ID + '.jpg')\n",
    "im_h, im_w = (640, 640)\n",
    "rescale_img = mmcv.imrescale(img, (im_h, im_w))\n",
    "padded_img = mmcv.impad(rescale_img, shape=(im_h, im_w))\n",
    "\n",
    "vis_lc_img = padded_img.copy()\n",
    "vis_sc_img = padded_img.copy()\n",
    "vis_target_image = padded_img.copy()\n",
    "multi_lvl_keypts = []\n",
    "multi_lvl_strides = []\n",
    "color_map = dict(\n",
    "    tc = (252, 255, 166),\n",
    "    sc = (193, 255, 215),\n",
    "    lc = (181, 222, 255)\n",
    ")\n",
    "\n",
    "for i in range(num_feats):\n",
    "    # get corresponding heats\n",
    "    target_center_list[i] = target_center_list[i].sigmoid()\n",
    "    tc = target_center_list[i].sigmoid()\n",
    "    lc = longside_center_list[i].sigmoid()\n",
    "    sc = shortside_center_list[i].sigmoid()\n",
    "    # off = offset_list[i]()\n",
    "    off = offset_list[i]\n",
    "    # tc = _local_maximum(tc)\n",
    "    sc = _local_maximum(sc)\n",
    "    lc = _local_maximum(lc)\n",
    "    _, feat_h, feat_w = tc.shape\n",
    "    stride_h, stride_w = float(im_h/feat_h), float(im_w/feat_w)\n",
    "    # get heat peaks and their offsets\n",
    "    tc_pos, sc_pos, lc_pos = torch.nonzero(tc[0]), torch.nonzero(sc[0]), torch.nonzero(lc[0])\n",
    "    tc_off, sc_off, lc_off = off[0:2, tc_pos[:,0], tc_pos[:,1]],\\\n",
    "                             off[2:4, sc_pos[:,0], sc_pos[:,1]],\\\n",
    "                             off[4:6, lc_pos[:,0], lc_pos[:,1]]\n",
    "    tc_pos, sc_pos, lc_pos = tc_pos.float(), sc_pos.float(), lc_pos.float()\n",
    "    # refine peak positions and map them to the original image\n",
    "    tc_pos[:,0], tc_pos[:,1] = tc_pos[:,0].float()+tc_off[1,:], tc_pos[:,1].float()+tc_off[0,:]\n",
    "    sc_pos[:,0], sc_pos[:,1] = sc_pos[:,0].float()+sc_off[1,:], sc_pos[:,1].float()+sc_off[0,:]\n",
    "    lc_pos[:,0], lc_pos[:,1] = lc_pos[:,0].float()+lc_off[1,:], lc_pos[:,1].float()+lc_off[0,:]\n",
    "    # tc_pos[:,0], tc_pos[:,1] = tc_pos[:,0] * stride_h, tc_pos[:,1] * stride_w\n",
    "    # sc_pos[:,0], sc_pos[:,1] = sc_pos[:,0] * stride_h, sc_pos[:,1] * stride_w\n",
    "    # lc_pos[:,0], lc_pos[:,1] = lc_pos[:,0] * stride_h, lc_pos[:,1] * stride_w\n",
    "    pos_dict = dict(\n",
    "        # tc = tc_pos,\n",
    "        sc = sc_pos,\n",
    "        lc = lc_pos,\n",
    "    )\n",
    "    multi_lvl_keypts.append(pos_dict)\n",
    "    multi_lvl_strides.append(dict(stride_w=stride_w, stride_h=stride_h))\n",
    "\n",
    "multi_lvl_keypts = multi_lvl_keypts[::-1]\n",
    "multi_lvl_strides = multi_lvl_strides[::-1]\n",
    "target_center_list = target_center_list[::-1]\n",
    "multi_lvl_sc_pairs = []\n",
    "multi_lvl_lc_pairs = []\n",
    "multi_lvl_target_pairs = []\n",
    "# visulization for key points\n",
    "for k, keypts in enumerate(multi_lvl_keypts):\n",
    "    stride_h, stride_w = multi_lvl_strides[k]['stride_h'], multi_lvl_strides[k]['stride_w']\n",
    "    strides = torch.tensor([1.0, 1.0])\n",
    "    for kpt_name, kpt_values in keypts.items():\n",
    "        if kpt_name == 'sc':\n",
    "            sc_pairs = _key_points_matcher(k, kpt_values, strides, 0.05)\n",
    "            multi_lvl_lc_pairs.append(sc_pairs)\n",
    "            # vis_sc_img = _key_points_visulization(vis_sc_img, sc_pairs)\n",
    "        elif kpt_name == 'lc':\n",
    "            lc_pairs = _key_points_matcher(k, kpt_values, strides, 0.05)\n",
    "            multi_lvl_sc_pairs.append(lc_pairs)\n",
    "            # visualize\n",
    "            # vis_lc_img = _key_points_visulization(vis_lc_img, lc_pairs)\n",
    "    if len(sc_pairs) == 0 or len(lc_pairs) == 0:\n",
    "        continue\n",
    "    sc_pairs = torch.tensor(sc_pairs)\n",
    "    lc_pairs = torch.tensor(lc_pairs)\n",
    "    sc_pair_centers = sc_pairs.permute(0, 2, 1).sum(dim=-1) / 2\n",
    "    lc_pair_centers = lc_pairs.permute(0, 2, 1).sum(dim=-1) / 2\n",
    "\n",
    "    lenA, lenB = len(sc_pair_centers), len(lc_pair_centers)\n",
    "    Ay, Ax = sc_pair_centers[:,0], sc_pair_centers[:,1]\n",
    "    By, Bx = lc_pair_centers[:,0], lc_pair_centers[:,1]\n",
    "\n",
    "    Ay = Ay.view(lenA, 1).expand(lenA, lenB)\n",
    "    Ax = Ax.view(lenA, 1).expand(lenA, lenB)\n",
    "    By = By.view(1, lenB).expand(lenA, lenB)\n",
    "    Bx = Bx.view(1, lenB).expand(lenA, lenB)\n",
    "\n",
    "    diff_y = Ay - By\n",
    "    diff_x = Ax - Bx\n",
    "\n",
    "    l2_dist = diff_y.pow(2) + diff_x.pow(2)\n",
    "\n",
    "    valid_ind = (l2_dist < 10).nonzero()\n",
    "    \n",
    "    paired_data_center = (sc_pair_centers[valid_ind[:,0]] + lc_pair_centers[valid_ind[:,1]]) / 2\n",
    "    paired_data_center = paired_data_center.long()\n",
    "\n",
    "    pos_valid_ind = target_center_list[k][0][paired_data_center[:,0], paired_data_center[:,1]] > 0.1\n",
    "\n",
    "    valid_ind = valid_ind[pos_valid_ind]\n",
    "\n",
    "    strides = torch.tensor([stride_h, stride_w])\n",
    "    valid_sc_pairs, valid_lc_pairs = sc_pairs[valid_ind[:,0]], lc_pairs[valid_ind[:,1]]\n",
    "\n",
    "    vis_target_image = _paired_points_visualization(k, padded_img, (valid_sc_pairs*strides, valid_lc_pairs*strides))\n",
    "\n",
    "# plt.figure()\n",
    "# plt.subplot(1,2,1)\n",
    "# plt.imshow(vis_sc_img[...,::-1])\n",
    "# plt.title('shortside center pairs')\n",
    "# plt.subplot(1,2,2)\n",
    "# plt.imshow(vis_lc_img[...,::-1])\n",
    "# plt.title('longside center pairs')\n",
    "# plt.show()\n",
    "plt.figure()\n",
    "plt.imshow(vis_target_image[...,::-1])\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 类似CornerNet的方法对检测的关键点结果进行解码"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## function for gathering feats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_local_maximum(heat, kernel=3):\n",
    "    \"\"\"Extract local maximum pixel with given kernel.\n",
    "\n",
    "    Args:\n",
    "        heat (Tensor): Target heatmap.\n",
    "        kernel (int): Kernel size of max pooling. Default: 3.\n",
    "\n",
    "    Returns:\n",
    "        heat (Tensor): A heatmap where local maximum pixels maintain its\n",
    "            own value and other positions are 0.\n",
    "    \"\"\"\n",
    "    pad = (kernel - 1) // 2\n",
    "    hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)\n",
    "    keep = (hmax == heat).float()\n",
    "    return heat * keep\n",
    "\n",
    "\n",
    "def get_topk_from_heatmap(scores, k=20):\n",
    "    \"\"\"Get top k positions from heatmap.\n",
    "\n",
    "    Args:\n",
    "        scores (Tensor): Target heatmap with shape\n",
    "            [batch, num_classes, height, width].\n",
    "        k (int): Target number. Default: 20.\n",
    "\n",
    "    Returns:\n",
    "        tuple[torch.Tensor]: Scores, indexes, categories and coords of\n",
    "            topk keypoint. Containing following Tensors:\n",
    "\n",
    "        - topk_scores (Tensor): Max scores of each topk keypoint.\n",
    "        - topk_inds (Tensor): Indexes of each topk keypoint.\n",
    "        - topk_clses (Tensor): Categories of each topk keypoint.\n",
    "        - topk_ys (Tensor): Y-coord of each topk keypoint.\n",
    "        - topk_xs (Tensor): X-coord of each topk keypoint.\n",
    "    \"\"\"\n",
    "    batch, _, height, width = scores.size()\n",
    "    topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)\n",
    "    topk_clses = topk_inds // (height * width)\n",
    "    topk_inds = topk_inds % (height * width)\n",
    "    topk_ys = topk_inds // width\n",
    "    topk_xs = (topk_inds % width).int().float()\n",
    "    return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs\n",
    "\n",
    "\n",
    "def gather_feat(feat, ind, mask=None):\n",
    "    \"\"\"Gather feature according to index.\n",
    "\n",
    "    Args:\n",
    "        feat (Tensor): Target feature map.\n",
    "        ind (Tensor): Target coord index.\n",
    "        mask (Tensor | None): Mask of feature map. Default: None.\n",
    "\n",
    "    Returns:\n",
    "        feat (Tensor): Gathered feature.\n",
    "    \"\"\"\n",
    "    dim = feat.size(2)\n",
    "    ind = ind.unsqueeze(2).repeat(1, 1, dim)\n",
    "    feat = feat.gather(1, ind)\n",
    "    if mask is not None:\n",
    "        mask = mask.unsqueeze(2).expand_as(feat)\n",
    "        feat = feat[mask]\n",
    "        feat = feat.view(-1, dim)\n",
    "    return feat\n",
    "\n",
    "\n",
    "def transpose_and_gather_feat(feat, ind):\n",
    "    \"\"\"Transpose and gather feature according to index.\n",
    "\n",
    "    Args:\n",
    "        feat (Tensor): Target feature map.\n",
    "        ind (Tensor): Target coord index.\n",
    "\n",
    "    Returns:\n",
    "        feat (Tensor): Transposed and gathered feature.\n",
    "    \"\"\"\n",
    "    feat = feat.permute(0, 2, 3, 1).contiguous()\n",
    "    feat = feat.view(feat.size(0), -1, feat.size(3))\n",
    "    feat = gather_feat(feat, ind)\n",
    "    return feat\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## decode function"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using matplotlib backend: TkAgg\n"
     ]
    }
   ],
   "source": [
    "import mmcv\n",
    "import torch\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import os.path as osp\n",
    "import torch.nn.functional as F\n",
    "import cv2\n",
    "%matplotlib\n",
    "IMG_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/ssdd/ssdd_gt_r/'\n",
    "CACHE_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/middle_part/ExtremeShipKPVis/Cache/ExtremeShipCacheHrnet'\n",
    "\n",
    "IMG_ID = '000762'\n",
    "SMALL_NUM = 1e-6\n",
    "# load heatmaps\n",
    "dict_heatmaps = mmcv.load(osp.join(CACHE_ROOT, IMG_ID+'.pkl'))\n",
    "\n",
    "target_center_list = dict_heatmaps['tc']\n",
    "longside_center_list = dict_heatmaps['lc']\n",
    "shortside_center_list = dict_heatmaps['sc']\n",
    "offset_list = dict_heatmaps['off']\n",
    "num_feats = len(target_center_list)\n",
    "\n",
    "img = mmcv.imread(IMG_ROOT + IMG_ID + '.jpg')\n",
    "im_h, im_w = (640, 640)\n",
    "rescale_img = mmcv.imrescale(img, (im_h, im_w))\n",
    "padded_img = mmcv.impad(rescale_img, shape=(im_h, im_w))\n",
    "\n",
    "\n",
    "def decode_heatmap_single(lc_heat, \n",
    "                          sc_heat, \n",
    "                          tc_heat, \n",
    "                          offset, \n",
    "                          k_pts, \n",
    "                          ec_conf_thr, \n",
    "                          tc_conf_thr,\n",
    "                          num_dets=1000, \n",
    "                          kernel=3):\n",
    "    \"\"\"Decode each level heatmaps into box predictions\n",
    "\n",
    "    Args:\n",
    "        lc_heat (Tensor): Heatmap for longside center. \n",
    "            Shape [batch, num_clses, h, w].\n",
    "        sc_heat (Tensor): Heatmap for shortside center. \n",
    "            Shape [batch, num_clses, h, w].\n",
    "        tc_heat (Tensor): Heatmap for target center. \n",
    "            Shape [batch, num_clses, h, w].\n",
    "        offset (Tensor): offsets for edge centers. In old versons, we used\n",
    "            offsets for target centers, which we needn't anymore.\n",
    "            Shape for old ver: [batch, 6, h, w].\n",
    "            the second dim stands for: \n",
    "            (tc_off_x, tc_off_y, sc_off_x, sc_off_y, lc_off_x, lc_off_y)\n",
    "            Shape for new ver: [batch, 4, h, w].\n",
    "            the second dim stands for: \n",
    "            (lc_off_x, lc_off_y, sc_off_x, sc_off_y)  \n",
    "        k_pts (int): select top k_pts key points from each heat map, and\n",
    "            do matching between these key points          \n",
    "        ec_conf_thr (float): Confidence threshold for edge centers. Edge \n",
    "            center score unther this threshold will be rejected.\n",
    "        tc_conf_thr (float): Confidence threshold for target centers. Target \n",
    "            center score unther this threshold will be rejected.        \n",
    "        num_dets (int): number of detections per feature\n",
    "        kernel (int, optional): max pooling kernel for detecting peaks.\n",
    "\n",
    "    Returns:\n",
    "        tuple[Tensor]: Results of detected bboxes and labels.\n",
    "\n",
    "            - det_bboxes (Tensor): key points detection results\n",
    "                Shape [batch, num_dets, 8] \n",
    "                (lc_a_xs, lc_a_ys, lc_b_xs, lc_b_ys,\n",
    "                 sc_a_xs, sc_a_ys, sc_b_xs, sc_b_ys)\n",
    "            - det_scores (Tensor): \n",
    "                Shape [batch, num_det, 1]\n",
    "            - det_clses (Tensor):\n",
    "                Shape [batch, num_det, 1]\n",
    "    \"\"\"\n",
    "    batch, _, height, width = tc_heat.size()\n",
    "\n",
    "    K = k_pts\n",
    "    lc_heat, sc_heat, tc_heat = lc_heat.sigmoid(), sc_heat.sigmoid(), tc_heat.sigmoid()\n",
    "\n",
    "    lc_heat = get_local_maximum(lc_heat, kernel=kernel)\n",
    "    sc_heat = get_local_maximum(sc_heat, kernel=kernel)\n",
    "    \n",
    "    lc_scores, lc_inds, lc_clses, lc_ys, lc_xs = get_topk_from_heatmap(lc_heat, k=k_pts)\n",
    "    sc_scores, sc_inds, sc_clses, sc_ys, sc_xs = get_topk_from_heatmap(sc_heat, k=k_pts)\n",
    "\n",
    "    sc_off = transpose_and_gather_feat(offset[:,2:4], sc_inds)\n",
    "    lc_off = transpose_and_gather_feat(offset[:,4:6], lc_inds)\n",
    "    # add offsets\n",
    "    lc_xs = lc_xs + lc_off[..., 0]\n",
    "    lc_ys = lc_ys + lc_off[..., 1]\n",
    "    sc_xs = sc_xs + sc_off[..., 0]\n",
    "    sc_ys = sc_ys + sc_off[..., 1]\n",
    "    # TODO: modifity to repeat in the future\n",
    "    lc_a_ys = lc_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)\n",
    "    lc_a_xs = lc_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)\n",
    "    lc_b_ys = lc_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)\n",
    "    lc_b_xs = lc_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)\n",
    "    sc_a_ys = sc_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)\n",
    "    sc_a_xs = sc_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)\n",
    "    sc_b_ys = sc_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)\n",
    "    sc_b_xs = sc_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)\n",
    "\n",
    "    lc_a_clses = lc_clses.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)\n",
    "    lc_b_clses = lc_clses.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)\n",
    "    sc_a_clses = sc_clses.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)\n",
    "    sc_b_clses = sc_clses.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)\n",
    "\n",
    "    lc_a_scores = lc_scores.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)\n",
    "    lc_b_scores = lc_scores.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)\n",
    "    sc_a_scores = sc_scores.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)\n",
    "    sc_b_scores = sc_scores.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)\n",
    "\n",
    "    # collision center detection\n",
    "    lc_ly = lc_a_ys - lc_b_ys\n",
    "    lc_lx = lc_a_xs - lc_b_xs\n",
    "    sc_ly = sc_a_ys - sc_b_ys\n",
    "    sc_lx = sc_a_xs - sc_b_xs\n",
    "\n",
    "    denom = lc_lx * sc_ly - sc_lx * lc_ly\n",
    "\n",
    "    none_collision = ((-SMALL_NUM < denom) & (denom < SMALL_NUM)).long()\n",
    "\n",
    "    denom_pos = denom > 0\n",
    "    lcsc_ly = lc_b_ys - sc_b_ys\n",
    "    lcsc_lx = lc_b_xs - sc_b_xs\n",
    "\n",
    "    s_numer = lc_lx * lcsc_ly - lc_ly * lcsc_lx\n",
    "    none_collision = none_collision + ((s_numer < 0) == denom_pos).long()\n",
    "    t_numer = sc_lx * lcsc_ly - sc_ly * lcsc_lx\n",
    "    none_collision = none_collision + ((t_numer < 0) == denom_pos).long()\n",
    "    none_collision = none_collision + \\\n",
    "        (s_numer.abs() > denom.abs()).long() +\\\n",
    "        (t_numer.abs() > denom.abs()).long()\n",
    "\n",
    "    collision_px = ((lc_a_xs + lc_b_xs + sc_a_xs + sc_b_xs + 1) / 4).long().clamp(0, width-1)\n",
    "    collision_py = ((lc_a_ys + lc_b_ys + sc_a_ys + sc_b_ys + 1) / 4).long().clamp(0, width-1)\n",
    "\n",
    "    # # calculate collision center\n",
    "    # t = t_numer / (denom + SMALL_NUM)\n",
    "\n",
    "    # collision_px = lc_b_xs + (t * lc_lx)\n",
    "    # collision_py = lc_b_ys + (t * lc_ly)\n",
    "\n",
    "    # collision_px = (collision_px + 0.5).long().clamp(0, width-1)\n",
    "    # collision_py = (collision_py + 0.5).long().clamp(0, height-1)\n",
    "\n",
    "    colli_inds = lc_a_clses.long() * (height * width) + \\\n",
    "                 collision_py * width + \\\n",
    "                 collision_px\n",
    "    colli_inds = colli_inds.view(batch, -1)\n",
    "    colli_heat = tc_heat.view(batch, -1, 1)\n",
    "    colli_scores = gather_feat(colli_heat, colli_inds)\n",
    "    colli_scores = colli_scores.view(batch, K, K, K, K)\n",
    "    \n",
    "    scores = (lc_a_scores + lc_b_scores + sc_a_scores + sc_b_scores + 2*colli_scores) / 6\n",
    "\n",
    "    # reject boxes based on classes\n",
    "    cls_inds = (lc_a_clses != lc_b_clses) + (sc_a_clses != sc_b_clses) + \\\n",
    "               (lc_a_clses != sc_a_clses)\n",
    "    cls_inds = (cls_inds > 0)    \n",
    "\n",
    "    # reject boxes based on none-collision and duplications\n",
    "    # TODO: move the indices to formal operations can boost performence\n",
    "    tl_splr = torch.tril_indices(K, K)\n",
    "    none_collision[:, tl_splr[0], tl_splr[1],...] = \\\n",
    "            none_collision[:, tl_splr[0], tl_splr[1],...] + 1\n",
    "    none_collision[..., tl_splr[0], tl_splr[1]] = \\\n",
    "            none_collision[..., tl_splr[0], tl_splr[1]] + 1\n",
    "    none_colli_inds = (none_collision > 0)\n",
    "\n",
    "    # reject boxes based on scores\n",
    "    low_scor_inds = (lc_a_scores < ec_conf_thr) + (lc_b_scores < ec_conf_thr) + \\\n",
    "                    (sc_a_scores < ec_conf_thr) + (sc_b_scores < ec_conf_thr) + \\\n",
    "                    (colli_scores < tc_conf_thr)\n",
    "    low_scor_inds = (low_scor_inds > 0)\n",
    "\n",
    "    scores = scores - cls_inds.float()\n",
    "    scores = scores - none_colli_inds.float()\n",
    "    scores = scores - low_scor_inds.float()\n",
    "\n",
    "    scores = scores.view(batch, -1)\n",
    "    scores, inds = torch.topk(scores, num_dets)\n",
    "    scores = scores.unsqueeze(2)\n",
    "\n",
    "    bboxes = torch.stack((lc_a_xs, lc_a_ys, lc_b_xs, lc_b_ys,\n",
    "                          sc_a_xs, sc_a_ys, sc_b_xs, sc_b_ys), dim=5)\n",
    "\n",
    "    bboxes = bboxes.view(batch, -1, 8)\n",
    "    bboxes = gather_feat(bboxes, inds)\n",
    "\n",
    "    clses = lc_a_clses.contiguous().view(batch, -1, 1)\n",
    "    clses = gather_feat(clses, inds).float()\n",
    "\n",
    "    # normalize bbox predictions to range (0, 1)\n",
    "    bboxes[:, 0::2] = bboxes[:, 0::2] / width\n",
    "    bboxes[:, 1::2] = bboxes[:, 1::2] / height\n",
    "    # form point sets into oriented bounding boxes\n",
    "\n",
    "\n",
    "    return bboxes, scores, clses\n",
    "\n",
    "# det_bboxes, det_scores, det_clses = decode_heatmap_single(longside_center_list[0][None].sigmoid(),\n",
    "#                                                             shortside_center_list[0][None].sigmoid(),\n",
    "#                                                             target_center_list[0][None].sigmoid(),\n",
    "#                                                             # TODO: fix the data problem\n",
    "#                                                             offset_list[0]()[None],\n",
    "#                                                             k_pts=40,\n",
    "#                                                             ec_conf_thr=0.01,\n",
    "#                                                             tc_conf_thr=0.01,\n",
    "#                                                             num_dets=20)\n",
    "\n",
    "\n",
    "# det_bboxes[:,0::2] = det_bboxes[:,0::2] * im_w\n",
    "# det_bboxes[:,1::2] = det_bboxes[:,1::2] * im_h\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## test for collision detection"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 3, 3, 3, 3])\n",
      "tensor([[0, 0, 1, 0, 1],\n",
      "        [0, 0, 2, 0, 1],\n",
      "        [0, 0, 2, 1, 2],\n",
      "        [0, 1, 2, 1, 2]])\n",
      "torch.Size([4, 5])\n",
      "collision_px: tensor([2.5000, 2.6056, 7.9310, 8.4494])\n",
      "collision_py: tensor([ 2.0000,  1.9296, 10.1035, 11.3038])\n"
     ]
    }
   ],
   "source": [
    "# test for the collision detection\n",
    "batch = 1\n",
    "SMALL_NUM = 1e-6\n",
    "\n",
    "lc = torch.tensor([[2, 1], [3, 3],[45,67]])\n",
    "sc = torch.tensor([[1, 3], [4, 1],[32.5,67]])\n",
    "K = len(lc)\n",
    "lc_xs, lc_ys = lc[:, 0], lc[:, 1]\n",
    "sc_xs, sc_ys = sc[:, 0], sc[:, 1]\n",
    "\n",
    "lc_a_ys = lc_ys.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)\n",
    "lc_a_xs = lc_xs.view(batch, K, 1, 1, 1).expand(batch, K, K, K, K)\n",
    "lc_b_ys = lc_ys.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)\n",
    "lc_b_xs = lc_xs.view(batch, 1, K, 1, 1).expand(batch, K, K, K, K)\n",
    "sc_a_ys = sc_ys.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)\n",
    "sc_a_xs = sc_xs.view(batch, 1, 1, K, 1).expand(batch, K, K, K, K)\n",
    "sc_b_ys = sc_ys.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)\n",
    "sc_b_xs = sc_xs.view(batch, 1, 1, 1, K).expand(batch, K, K, K, K)\n",
    "\n",
    "# parrallel detection\n",
    "lc_ly = lc_a_ys - lc_b_ys\n",
    "lc_lx = lc_a_xs - lc_b_xs\n",
    "sc_ly = sc_a_ys - sc_b_ys\n",
    "sc_lx = sc_a_xs - sc_b_xs\n",
    "\n",
    "denom = lc_lx * sc_ly - sc_lx * lc_ly\n",
    "\n",
    "none_collision = ((-SMALL_NUM < denom) & (denom < SMALL_NUM)).long()\n",
    "\n",
    "denom_pos = denom > 0\n",
    "lcsc_ly = lc_b_ys - sc_b_ys\n",
    "lcsc_lx = lc_b_xs - sc_b_xs\n",
    "\n",
    "s_numer = lc_lx * lcsc_ly - lc_ly * lcsc_lx\n",
    "none_collision = none_collision + ((s_numer < 0) == denom_pos).long()\n",
    "t_numer = sc_lx * lcsc_ly - sc_ly * lcsc_lx\n",
    "none_collision = none_collision + ((t_numer < 0) == denom_pos).long()\n",
    "none_collision = none_collision + \\\n",
    "    (s_numer.abs() > denom.abs()).long() +\\\n",
    "    (t_numer.abs() > denom.abs()).long()\n",
    "t = t_numer / (denom + SMALL_NUM)\n",
    "collision_px = lc_b_xs + (t * lc_lx)\n",
    "collision_py = lc_b_ys + (t * lc_ly)\n",
    "\n",
    "tl_splr = torch.tril_indices(K, K, 0)\n",
    "none_collision[:, tl_splr[0], tl_splr[1],...] = none_collision[:, tl_splr[0], tl_splr[1],...] + 1\n",
    "none_collision[..., tl_splr[0], tl_splr[1]] = none_collision[..., tl_splr[0], tl_splr[1]] + 1\n",
    "collision = none_collision == 0\n",
    "\n",
    "print(collision.shape)\n",
    "# print(collision)\n",
    "print(torch.nonzero(collision))\n",
    "print(torch.nonzero(collision).shape)\n",
    "print('collision_px:', collision_px[collision])\n",
    "print('collision_py:', collision_py[collision])\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## test for rbox generation"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def keypoints2rbboxes(bboxes, using_geo_center=True):\n",
    "    # bboxes = torch.stack((lc_a_xs, lc_a_ys, lc_b_xs, lc_b_ys,\n",
    "    #                       sc_a_xs, sc_a_ys, sc_b_xs, sc_b_ys), dim=5)\n",
    "    # calculate a\n",
    "    batch = bboxes.size(0)\n",
    "    dy = bboxes[...,7] - bboxes[...,5]\n",
    "    dx = bboxes[...,6] - bboxes[...,4]\n",
    "    a = torch.atan2(dy , dx)\n",
    "\n",
    "    # calculate w, which is the length between shortside centers\n",
    "    sc_vec = bboxes[...,4:6] - bboxes[...,6:8]\n",
    "    w = torch.norm(sc_vec, dim=-1)\n",
    "\n",
    "    # calculate h\n",
    "    lc_pts = bboxes[...,:4].view(batch,-1,2,1,2).repeat(1,1,1,2,1)\n",
    "    sc_pts = bboxes[...,4:].view(batch,-1,1,2,2).repeat(1,1,2,1,1)\n",
    "    vec = sc_pts - lc_pts\n",
    "    vec_3d = torch.zeros((vec.size(0), vec.size(1), vec.size(2), vec.size(3), 3))\n",
    "    vec_3d[...,:2] = vec\n",
    "    area = torch.cross(vec_3d[...,0,:], vec_3d[...,1,:], dim=-1)\n",
    "    g_vec = sc_pts[...,0,:] - sc_pts[...,1,:]\n",
    "    g_len = torch.norm(g_vec, dim=-1)\n",
    "    dist = area[...,2].abs() / g_len\n",
    "    h = dist.sum(dim=-1)\n",
    "\n",
    "    # calculate x, y\n",
    "    if using_geo_center:\n",
    "        x = bboxes[...,0::2].sum(dim=-1) / 4\n",
    "        y = bboxes[...,1::2].sum(dim=-1) / 4\n",
    "    else:\n",
    "        raise NotImplementedError\n",
    "        \n",
    "    rbboxes = torch.stack([x,y,w,h,a],dim=-1)\n",
    "\n",
    "    return rbboxes\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## multi-level result generation"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### visulization tools"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [],
   "source": [
    "import cv2\n",
    "def _det_points_visualization(vis_size, vis_img, paired_kpts):\n",
    "\n",
    "    k = vis_size\n",
    "    paired_kpts = (paired_kpts + 0.5).long().numpy()\n",
    "    for kpts in paired_kpts:\n",
    "        color = torch.randint(0,256, size=(3,)).numpy().tolist()\n",
    "        for i in range(4):\n",
    "            x, y = kpts[2*i], kpts[2*i+1]\n",
    "            cv2.circle(vis_img, (x, y), k, color, -1)\n",
    "            cv2.circle(vis_img, (x, y), k, (255,255,255), 1)\n",
    "\n",
    "    return vis_img\n",
    "\n",
    "def _det_rboxes_visualization(vis_img, rbboxes):\n",
    "\n",
    "    print(rbboxes.shape)\n",
    "    rbbs = rbboxes.numpy()\n",
    "    for rbb in rbbs:\n",
    "        color = torch.randint(0,256, size=(3,)).numpy().tolist()\n",
    "        x,y,w,h,a = rbb[0], rbb[1], rbb[2], rbb[3], rbb[4] * 180 / np.pi\n",
    "        while not 0 > a >= -90:\n",
    "            if a >= 0:\n",
    "                a -= 90\n",
    "                w, h = h, w\n",
    "            else:\n",
    "                a += 90\n",
    "                w, h = h, w\n",
    "        rect = cv2.boxPoints(((x,y ), (w,h), a))\n",
    "        rect = np.around(rect).astype(np.int0)\n",
    "        cv2.drawContours(vis_img, [rect], -1, color, 2)\n",
    "    return vis_img\n",
    "\n",
    "def _det_rboxes_and_kpts_visualization(vis_img_kpts, vis_img_rboxes, vis_size, paired_kpts, rbboxes):\n",
    "    k = vis_size\n",
    "    paired_kpts = (paired_kpts + 0.5).long().numpy()\n",
    "    rbbs = rbboxes.numpy()\n",
    "    for j, kpts in enumerate(paired_kpts):\n",
    "        # keypoints\n",
    "        color = torch.randint(0,256, size=(3,)).numpy().tolist()\n",
    "        for i in range(4):\n",
    "            x, y = kpts[2*i], kpts[2*i+1]\n",
    "            cv2.circle(vis_img_kpts, (x, y), k, color, -1)\n",
    "            cv2.circle(vis_img_kpts, (x, y), k, (255,255,255), 1)   \n",
    "\n",
    "        rbb = rbbs[j]\n",
    "        # rboxes\n",
    "        x,y,w,h,a = rbb[0], rbb[1], rbb[2], rbb[3], rbb[4] * 180 / np.pi\n",
    "        while not 0 > a >= -90:\n",
    "            if a >= 0:\n",
    "                a -= 90\n",
    "                w, h = h, w\n",
    "            else:\n",
    "                a += 90\n",
    "                w, h = h, w\n",
    "        rect = cv2.boxPoints(((x,y ), (w,h), a))\n",
    "        rect = np.around(rect).astype(np.int0)\n",
    "        cv2.drawContours(vis_img_rboxes, [rect], -1, color, 2) \n",
    "\n",
    "    return vis_img_kpts, vis_img_rboxes\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([1, 2, 5])\n"
     ]
    }
   ],
   "source": [
    "bboxes = torch.tensor([[[1,4,5,6,3,3,3,9],\n",
    "                    [1,5,5,1,1,1,5,5]]], dtype=torch.float32)\n",
    "canvas = np.zeros((100,100,3), dtype=np.uint8)\n",
    "rbboxes = keypoints2rbboxes(bboxes, True)\n",
    "print(rbboxes.shape)\n",
    "# _det_rboxes_visualization(None, rbboxes)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### function"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 138,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[0.2718, 0.6157, 0.2129, 0.0543, 2.4393]]])\n",
      "tensor([[[173.9832, 394.0232, 136.2368,  34.7633,   2.4393]]])\n"
     ]
    }
   ],
   "source": [
    "feat_lvl = 1\n",
    "IMG_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/ssdd/ssdd_gt_r/'\n",
    "CACHE_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/middle_part/ExtremeShipKPVis/Cache/ExtremeShipCacheHrnet'\n",
    "\n",
    "IMG_ID = '000762'\n",
    "SMALL_NUM = 1e-6\n",
    "# load heatmaps\n",
    "dict_heatmaps = mmcv.load(osp.join(CACHE_ROOT, IMG_ID+'.pkl'))\n",
    "\n",
    "target_center_list = dict_heatmaps['tc']\n",
    "longside_center_list = dict_heatmaps['lc']\n",
    "shortside_center_list = dict_heatmaps['sc']\n",
    "offset_list = dict_heatmaps['off']\n",
    "num_feats = len(target_center_list)\n",
    "\n",
    "img = mmcv.imread(IMG_ROOT \n",
    "+ IMG_ID + '.jpg')\n",
    "im_h, im_w = (640, 640)\n",
    "rescale_img = mmcv.imrescale(img, (im_h, im_w))\n",
    "padded_img = mmcv.impad(rescale_img, shape=(im_h, im_w))\n",
    "det_bboxes, det_scores, det_clses = decode_heatmap_single(longside_center_list[feat_lvl][None],\n",
    "                                                            shortside_center_list[feat_lvl][None],\n",
    "                                                            target_center_list[feat_lvl][None],\n",
    "                                                            # TODO: fix the data problem\n",
    "                                                            offset_list[feat_lvl]()[None],\n",
    "                                                            k_pts=40,\n",
    "                                                            ec_conf_thr=0.01,\n",
    "                                                            tc_conf_thr=0.1,\n",
    "                                                            num_dets=20)\n",
    "\n",
    "\n",
    "keep_box = 6\n",
    "keep_inds = (det_scores > 0)[...,0]\n",
    "det_bboxes = det_bboxes[keep_inds][keep_box][None][None]\n",
    "det_scores = det_scores[keep_inds][keep_box][None][None]\n",
    "det_clses = det_clses[keep_inds][keep_box][None][None]\n",
    "det_rboxes = keypoints2rbboxes(det_bboxes)\n",
    "print(det_rboxes)\n",
    "\n",
    "det_bboxes[...,0::2] = det_bboxes[...,0::2] * im_w\n",
    "det_bboxes[...,1::2] = det_bboxes[...,1::2] * im_h\n",
    "vis_target_image = padded_img.copy()\n",
    "vis_rbbox_image = padded_img.copy()\n",
    "lc_heat = longside_center_list[feat_lvl][0].numpy()\n",
    "lc_heat = mmcv.imresize(lc_heat, (im_w, im_h))\n",
    "sc_heat = shortside_center_list[feat_lvl][0].numpy()\n",
    "sc_heat = mmcv.imresize(sc_heat, (im_w, im_h))\n",
    "det_rboxes = keypoints2rbboxes(det_bboxes)\n",
    "print(det_rboxes)\n",
    "# vis_img = _det_points_visualization(4, vis_target_image, det_bboxes[0])\n",
    "# vis_rbox = _det_rboxes_visualization(vis_rbbox_image, det_rboxes[0])\n",
    "vis_img, vis_rbox = _det_rboxes_and_kpts_visualization(vis_target_image, vis_rbbox_image, 5, det_bboxes[0], det_rboxes[0])\n",
    "# vis_lc_img = _det_points_visualization(8, vis_lc_img, det_bboxes[0])\n",
    "plt.figure()\n",
    "plt.subplot(1,2,1)\n",
    "plt.imshow(vis_img[...,::-1])\n",
    "plt.imshow(sc_heat, alpha=0.4)\n",
    "plt.subplot(1,2,2)\n",
    "plt.imshow(vis_rbox[...,::-1])\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 108,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "def _get_bboxes_single(all_lvl_bbox_kpts,\n",
    "                       all_lvl_bbox_scores,\n",
    "                       all_lvl_bbox_clses,\n",
    "                       img_meta,\n",
    "                       valid_size_range,\n",
    "                       rescale=False,\n",
    "                       with_nms=True):\n",
    "\n",
    "    num_lvl = len(all_lvl_bbox_kpts)\n",
    "    \n",
    "    all_lvl_rboxes = []\n",
    "    for bbox_kpts in all_lvl_bbox_kpts:\n",
    "        rboxes = keypoints2rbboxes(bbox_kpts)\n",
    "        all_lvl_rboxes.append(rboxes)\n",
    "\n",
    "    if valid_size_range is not None:\n",
    "        for i in range(num_lvl):\n",
    "            valid_ind = (all_lvl_rboxes[i][...,2] > valid_size_range[i][0]) & \\\n",
    "                        (all_lvl_rboxes[i][...,2] < valid_size_range[i][1])\n",
    "            all_lvl_rboxes[i] = all_lvl_rboxes[i][valid_ind][None]\n",
    "            all_lvl_bbox_scores[i] = all_lvl_bbox_scores[i][valid_ind][None]\n",
    "            all_lvl_bbox_clses[i] = all_lvl_bbox_clses[i][valid_ind][None]\n",
    "\n",
    "    det_rboxes = torch.cat(all_lvl_rboxes, dim=1)\n",
    "    det_scores = torch.cat(all_lvl_bbox_scores, dim=1)\n",
    "    det_clses = torch.cat(all_lvl_bbox_clses, dim=1)\n",
    "\n",
    "    # keep boxes based on scores\n",
    "    valid_score = det_scores > 0\n",
    "    # keep boxes based on ratio\n",
    "    valid_ratio = ((det_rboxes[...,2] / det_rboxes[...,3]) < 5 ) & \\\n",
    "                  ((det_rboxes[...,2] / det_rboxes[...,3]) > 1.0)\n",
    "    keep_ind = valid_score[...,0] & valid_ratio\n",
    "    det_rboxes = det_rboxes[keep_ind]\n",
    "    det_scores = det_scores[keep_ind]\n",
    "    det_clses = det_clses[keep_ind]\n",
    "\n",
    "    det_rboxes = torch.cat([det_rboxes, det_scores], dim=-1)\n",
    "\n",
    "    img_shape = img_meta.get('pad_shape', None) if not None else img_meta['img_shape']\n",
    "    # TODO: May contain error here!! \n",
    "    img_h, img_w, _ = img_shape\n",
    "    det_rboxes[..., 0] = det_rboxes[..., 0] * img_w\n",
    "    det_rboxes[..., 1] = det_rboxes[..., 1] * img_h\n",
    "    det_rboxes[..., 2:4] = det_rboxes[..., 2:4] * math.sqrt(img_h * img_w)\n",
    "\n",
    "    if rescale:\n",
    "        det_rboxes[..., :4] = det_rboxes[..., :4] / det_rboxes.new_tensor(img_meta['scale_factor'])\n",
    "\n",
    "    if with_nms:\n",
    "        raise NotImplementedError\n",
    "\n",
    "    return det_rboxes, det_clses"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 137,
   "metadata": {},
   "outputs": [],
   "source": [
    "from mmdet.core import multi_apply\n",
    "def decode_multi_lvl_heatmaps(list_longside_center,\n",
    "                              list_shortside_center,\n",
    "                              list_target_center,\n",
    "                              list_offset,\n",
    "                              list_num_pkts_per_lvl,\n",
    "                              ec_conf_thr,\n",
    "                              tc_conf_thr,\n",
    "                              img_metas,\n",
    "                              valid_size_range=None,\n",
    "                              rescale=False,\n",
    "                              with_nms=False):\n",
    "\n",
    "    result_list = []\n",
    "\n",
    "    assert len(list_longside_center) == len(list_shortside_center)  and \\\n",
    "           len(list_shortside_center) == len(list_target_center) and \\\n",
    "           len(list_longside_center) == len(list_offset) and \\\n",
    "           len(list_offset) == len(list_num_pkts_per_lvl)\n",
    "\n",
    "    if valid_size_range is not None:\n",
    "       assert len(valid_size_range) == len(list_target_center)\n",
    "    \n",
    "    multi_lvl_bbox_kpts, multi_lvl_scores, multi_lvl_clses = multi_apply(\n",
    "           decode_heatmap_single,\n",
    "           list_longside_center,\n",
    "           list_shortside_center,\n",
    "           list_target_center,\n",
    "           list_offset,\n",
    "           list_num_pkts_per_lvl,\n",
    "           ec_conf_thr=ec_conf_thr,\n",
    "           tc_conf_thr=tc_conf_thr,\n",
    "           num_dets=50\n",
    "    )\n",
    "    \n",
    "    for img_id in range(len(img_metas)):\n",
    "           all_lvl_bbox_kpts = [kpts[img_id][None] for kpts in multi_lvl_bbox_kpts]\n",
    "           all_lvl_bbox_scores = [scores[img_id][None] for scores in multi_lvl_scores]\n",
    "           all_lvl_bbox_clses = [clses[img_id][None] for clses in multi_lvl_clses]\n",
    "           results = _get_bboxes_single(all_lvl_bbox_kpts,\n",
    "                                        all_lvl_bbox_scores,\n",
    "                                        all_lvl_bbox_clses,\n",
    "                                        img_metas[img_id],\n",
    "                                        valid_size_range,\n",
    "                                        rescale=rescale,\n",
    "                                        with_nms=with_nms)\n",
    "           result_list.append(results)\n",
    "    \n",
    "    return result_list\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 139,
   "metadata": {},
   "outputs": [],
   "source": [
    "# wash input data clean\n",
    "num_lvl = len(target_center_list)\n",
    "for i in range(num_lvl):\n",
    "    longside_center_list[i] = longside_center_list[i][None]\n",
    "    shortside_center_list[i] = shortside_center_list[i][None]\n",
    "    target_center_list[i] = target_center_list[i][None]\n",
    "    offset_list[i] = offset_list[i]()[None]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 140,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([0, 6])\n",
      "tensor([], size=(0, 6))\n"
     ]
    }
   ],
   "source": [
    "\n",
    "image_metas = [dict(\n",
    "    img_shape=(480, 640, 3),\n",
    "    pad_shape=(640, 640, 3),\n",
    "    scale_factor=1\n",
    ")]\n",
    "\n",
    "\n",
    "\n",
    "num_kpts_per_lvl = [40,40,20,10,5]\n",
    "valid_size_range = [(-1, 0.2), (0.05, 0.4), (0.1, 0.8), (0.2, 1), (0.4, 2)] \n",
    "\n",
    "result = decode_multi_lvl_heatmaps(longside_center_list, \n",
    "                          shortside_center_list, \n",
    "                          target_center_list,\n",
    "                          offset_list,\n",
    "                          num_kpts_per_lvl,\n",
    "                          1,\n",
    "                          1,\n",
    "                          img_metas=image_metas,\n",
    "                          valid_size_range=valid_size_range)\n",
    "vis_rbbox_image = padded_img.copy()\n",
    "vis_rbox = _det_rboxes_visualization(vis_rbbox_image, result[0][0])\n",
    "print(result[0][0])\n",
    "plt.figure()\n",
    "plt.imshow(vis_rbox[...,::-1])\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# visulize coco gts"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading annotations into memory...\n",
      "Done (t=0.17s)\n",
      "creating index...\n",
      "index created!\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 1962/1962 [01:04<00:00, 30.39it/s]\n"
     ]
    }
   ],
   "source": [
    "from pycocotools.coco import COCO\n",
    "import cv2\n",
    "from tqdm import tqdm\n",
    "import numpy as np\n",
    "import os\n",
    "\n",
    "ann_file = '/media/gejunyao/Disk1/Datasets/HRSID/annotations/test2017.json'\n",
    "IMG_ROOT = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/hrsid/ExtremeShip_HRNET/'\n",
    "OUT_DIR = '/media/gejunyao/Disk/Gejunyao/exp_results/visualization/results/hrsid/ExtremeShip_HRNET/with_gt'\n",
    "coco = COCO(ann_file)\n",
    "img_ids = coco.getImgIds()\n",
    "\n",
    "if not os.path.exists(OUT_DIR):\n",
    "    os.mkdir(OUT_DIR)\n",
    "\n",
    "data_infos = []\n",
    "for id in img_ids:\n",
    "    info = coco.loadImgs([id])[0]\n",
    "    info['filename'] = info['file_name']\n",
    "    data_infos.append(info)\n",
    "\n",
    "for data_info in tqdm(data_infos):\n",
    "    img_id = data_info['id']\n",
    "    ann_ids = coco.getAnnIds(imgIds=[img_id])\n",
    "    ann_info = coco.loadAnns(ann_ids)\n",
    "    # read image to visulize results\n",
    "    fname = data_info['filename']\n",
    "    img_file = os.path.join(IMG_ROOT, fname)\n",
    "    img = cv2.imread(img_file)\n",
    "    blank = np.zeros_like(img)\n",
    "    # draw polygen gts on image\n",
    "    for ann in ann_info:\n",
    "        gt_mask = coco.annToMask(ann)\n",
    "        # color = np.random.randint(0,255,size=3).tolist()   \n",
    "        color = [255, 0, 0]  \n",
    "        if np.max(gt_mask)!= 0:\n",
    "            contours, hierarchy = cv2.findContours(gt_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n",
    "            cv2.drawContours(blank, contours, -1, color, -1)\n",
    "    res = cv2.addWeighted(img, 0.5, blank, 0.5, 1)\n",
    "    # save image\n",
    "    save_path = os.path.join(OUT_DIR, fname)\n",
    "    cv2.imwrite(save_path, res)\n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[1, 2, 3]\n"
     ]
    }
   ],
   "source": [
    "test_list = [1,2,3,4]\n",
    "print(test_list[:-1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "713258e7866f8e573fdf0e2b04387b736e9386153199bdf40bc22039aca92ed3"
  },
  "kernelspec": {
   "display_name": "Python 3.7.11 64-bit ('rmml': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.11"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
