{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "12515b58",
   "metadata": {
    "id": "RonaN8w3UgjU",
    "papermill": {
     "duration": 0.011631,
     "end_time": "2022-04-12T20:36:04.664380",
     "exception": false,
     "start_time": "2022-04-12T20:36:04.652749",
     "status": "completed"
    },
    "tags": []
   },
   "source": [
    "# Setup"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "8d448520",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:36:04.692007Z",
     "iopub.status.busy": "2022-04-12T20:36:04.690550Z",
     "iopub.status.idle": "2022-04-12T20:39:30.505107Z",
     "shell.execute_reply": "2022-04-12T20:39:30.504300Z"
    },
    "executionInfo": {
     "elapsed": 388,
     "status": "ok",
     "timestamp": 1638913128041,
     "user": {
      "displayName": "Konrad Banachewicz",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhI23dFPH4TCCx-UXI_tTLJRhGGqpOO_xMfc26jqSA=s64",
      "userId": "15372017465249196723"
     },
     "user_tz": -60
    },
    "id": "C8KB9f6-aBa3",
    "papermill": {
     "duration": 205.828806,
     "end_time": "2022-04-12T20:39:30.505266",
     "exception": false,
     "start_time": "2022-04-12T20:36:04.676460",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Collecting git+https://github.com/facebookresearch/detectron2.git\r\n",
      "  Cloning https://github.com/facebookresearch/detectron2.git to /tmp/pip-req-build-ua9u4790\r\n",
      "  Running command git clone --filter=blob:none -q https://github.com/facebookresearch/detectron2.git /tmp/pip-req-build-ua9u4790\r\n",
      "  Resolved https://github.com/facebookresearch/detectron2.git to commit 2409af0bf0d4bdcc685feb6d2c7fd659828acac4\r\n",
      "  Preparing metadata (setup.py) ... \u001b[?25l-\b \b\\\b \bdone\r\n",
      "\u001b[?25hRequirement already satisfied: Pillow>=7.1 in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (8.2.0)\r\n",
      "Requirement already satisfied: matplotlib in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (3.5.0)\r\n",
      "Collecting pycocotools>=2.0.2\r\n",
      "  Downloading pycocotools-2.0.4.tar.gz (106 kB)\r\n",
      "     |████████████████████████████████| 106 kB 189 kB/s            \r\n",
      "\u001b[?25h  Installing build dependencies ... \u001b[?25l-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \b|\b \b/\b \bdone\r\n",
      "\u001b[?25h  Getting requirements to build wheel ... \u001b[?25l-\b \b\\\b \b|\b \b/\b \bdone\r\n",
      "\u001b[?25h  Preparing metadata (pyproject.toml) ... \u001b[?25l-\b \b\\\b \b|\b \b/\b \bdone\r\n",
      "\u001b[?25hRequirement already satisfied: termcolor>=1.1 in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (1.1.0)\r\n",
      "Requirement already satisfied: yacs>=0.1.8 in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (0.1.8)\r\n",
      "Requirement already satisfied: tabulate in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (0.8.9)\r\n",
      "Requirement already satisfied: cloudpickle in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (2.0.0)\r\n",
      "Requirement already satisfied: tqdm>4.29.0 in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (4.62.3)\r\n",
      "Requirement already satisfied: tensorboard in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (2.6.0)\r\n",
      "Collecting fvcore<0.1.6,>=0.1.5\r\n",
      "  Downloading fvcore-0.1.5.post20220305.tar.gz (50 kB)\r\n",
      "     |████████████████████████████████| 50 kB 868 kB/s            \r\n",
      "\u001b[?25h  Preparing metadata (setup.py) ... \u001b[?25l-\b \bdone\r\n",
      "\u001b[?25hCollecting iopath<0.1.10,>=0.1.7\r\n",
      "  Downloading iopath-0.1.9-py3-none-any.whl (27 kB)\r\n",
      "Requirement already satisfied: future in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (0.18.2)\r\n",
      "Requirement already satisfied: pydot in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (1.4.2)\r\n",
      "Collecting omegaconf>=2.1\r\n",
      "  Downloading omegaconf-2.2.0-py3-none-any.whl (77 kB)\r\n",
      "     |████████████████████████████████| 77 kB 860 kB/s            \r\n",
      "\u001b[?25hCollecting hydra-core>=1.1\r\n",
      "  Downloading hydra_core-1.1.2-py3-none-any.whl (147 kB)\r\n",
      "     |████████████████████████████████| 147 kB 1.1 MB/s            \r\n",
      "\u001b[?25hCollecting black==21.4b2\r\n",
      "  Downloading black-21.4b2-py3-none-any.whl (130 kB)\r\n",
      "     |████████████████████████████████| 130 kB 2.1 MB/s            \r\n",
      "\u001b[?25hRequirement already satisfied: scipy>1.5.1 in /opt/conda/lib/python3.7/site-packages (from detectron2==0.6) (1.7.2)\r\n",
      "Requirement already satisfied: pathspec<1,>=0.8.1 in /opt/conda/lib/python3.7/site-packages (from black==21.4b2->detectron2==0.6) (0.9.0)\r\n",
      "Requirement already satisfied: click>=7.1.2 in /opt/conda/lib/python3.7/site-packages (from black==21.4b2->detectron2==0.6) (8.0.3)\r\n",
      "Requirement already satisfied: typed-ast>=1.4.2 in /opt/conda/lib/python3.7/site-packages (from black==21.4b2->detectron2==0.6) (1.5.0)\r\n",
      "Requirement already satisfied: toml>=0.10.1 in /opt/conda/lib/python3.7/site-packages (from black==21.4b2->detectron2==0.6) (0.10.2)\r\n",
      "Requirement already satisfied: mypy-extensions>=0.4.3 in /opt/conda/lib/python3.7/site-packages (from black==21.4b2->detectron2==0.6) (0.4.3)\r\n",
      "Requirement already satisfied: typing-extensions>=3.7.4 in /opt/conda/lib/python3.7/site-packages (from black==21.4b2->detectron2==0.6) (3.10.0.2)\r\n",
      "Requirement already satisfied: appdirs in /opt/conda/lib/python3.7/site-packages (from black==21.4b2->detectron2==0.6) (1.4.4)\r\n",
      "Requirement already satisfied: regex>=2020.1.8 in /opt/conda/lib/python3.7/site-packages (from black==21.4b2->detectron2==0.6) (2021.11.10)\r\n",
      "Requirement already satisfied: numpy in /opt/conda/lib/python3.7/site-packages (from fvcore<0.1.6,>=0.1.5->detectron2==0.6) (1.19.5)\r\n",
      "Requirement already satisfied: pyyaml>=5.1 in /opt/conda/lib/python3.7/site-packages (from fvcore<0.1.6,>=0.1.5->detectron2==0.6) (6.0)\r\n",
      "Collecting importlib-resources<5.3\r\n",
      "  Downloading importlib_resources-5.2.3-py3-none-any.whl (27 kB)\r\n",
      "Collecting omegaconf>=2.1\r\n",
      "  Downloading omegaconf-2.1.2-py3-none-any.whl (74 kB)\r\n",
      "     |████████████████████████████████| 74 kB 1.4 MB/s            \r\n",
      "\u001b[?25hCollecting antlr4-python3-runtime==4.8\r\n",
      "  Downloading antlr4-python3-runtime-4.8.tar.gz (112 kB)\r\n",
      "     |████████████████████████████████| 112 kB 2.1 MB/s            \r\n",
      "\u001b[?25h  Preparing metadata (setup.py) ... \u001b[?25l-\b \bdone\r\n",
      "\u001b[?25hRequirement already satisfied: portalocker in /opt/conda/lib/python3.7/site-packages (from iopath<0.1.10,>=0.1.7->detectron2==0.6) (2.3.2)\r\n",
      "Requirement already satisfied: pyparsing>=2.2.1 in /opt/conda/lib/python3.7/site-packages (from matplotlib->detectron2==0.6) (3.0.6)\r\n",
      "Requirement already satisfied: fonttools>=4.22.0 in /opt/conda/lib/python3.7/site-packages (from matplotlib->detectron2==0.6) (4.28.2)\r\n",
      "Requirement already satisfied: cycler>=0.10 in /opt/conda/lib/python3.7/site-packages (from matplotlib->detectron2==0.6) (0.11.0)\r\n",
      "Requirement already satisfied: python-dateutil>=2.7 in /opt/conda/lib/python3.7/site-packages (from matplotlib->detectron2==0.6) (2.8.0)\r\n",
      "Requirement already satisfied: packaging>=20.0 in /opt/conda/lib/python3.7/site-packages (from matplotlib->detectron2==0.6) (21.0)\r\n",
      "Requirement already satisfied: setuptools-scm>=4 in /opt/conda/lib/python3.7/site-packages (from matplotlib->detectron2==0.6) (6.3.2)\r\n",
      "Requirement already satisfied: kiwisolver>=1.0.1 in /opt/conda/lib/python3.7/site-packages (from matplotlib->detectron2==0.6) (1.3.2)\r\n",
      "Requirement already satisfied: requests<3,>=2.21.0 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (2.25.1)\r\n",
      "Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (1.8.0)\r\n",
      "Requirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (0.6.1)\r\n",
      "Requirement already satisfied: protobuf>=3.6.0 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (3.19.1)\r\n",
      "Requirement already satisfied: absl-py>=0.4 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (0.15.0)\r\n",
      "Requirement already satisfied: werkzeug>=0.11.15 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (2.0.2)\r\n",
      "Requirement already satisfied: google-auth<2,>=1.6.3 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (1.35.0)\r\n",
      "Requirement already satisfied: grpcio>=1.24.3 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (1.42.0)\r\n",
      "Requirement already satisfied: setuptools>=41.0.0 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (59.1.1)\r\n",
      "Requirement already satisfied: wheel>=0.26 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (0.37.0)\r\n",
      "Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (0.4.6)\r\n",
      "Requirement already satisfied: markdown>=2.6.8 in /opt/conda/lib/python3.7/site-packages (from tensorboard->detectron2==0.6) (3.3.6)\r\n",
      "Requirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from absl-py>=0.4->tensorboard->detectron2==0.6) (1.16.0)\r\n",
      "Requirement already satisfied: importlib-metadata in /opt/conda/lib/python3.7/site-packages (from click>=7.1.2->black==21.4b2->detectron2==0.6) (4.8.2)\r\n",
      "Requirement already satisfied: rsa<5,>=3.1.4 in /opt/conda/lib/python3.7/site-packages (from google-auth<2,>=1.6.3->tensorboard->detectron2==0.6) (4.7.2)\r\n",
      "Requirement already satisfied: pyasn1-modules>=0.2.1 in /opt/conda/lib/python3.7/site-packages (from google-auth<2,>=1.6.3->tensorboard->detectron2==0.6) (0.2.7)\r\n",
      "Requirement already satisfied: cachetools<5.0,>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from google-auth<2,>=1.6.3->tensorboard->detectron2==0.6) (4.2.4)\r\n",
      "Requirement already satisfied: requests-oauthlib>=0.7.0 in /opt/conda/lib/python3.7/site-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard->detectron2==0.6) (1.3.0)\r\n",
      "Requirement already satisfied: zipp>=3.1.0 in /opt/conda/lib/python3.7/site-packages (from importlib-resources<5.3->hydra-core>=1.1->detectron2==0.6) (3.6.0)\r\n",
      "Requirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard->detectron2==0.6) (2.10)\r\n",
      "Requirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard->detectron2==0.6) (4.0.0)\r\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard->detectron2==0.6) (2021.10.8)\r\n",
      "Requirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests<3,>=2.21.0->tensorboard->detectron2==0.6) (1.26.7)\r\n",
      "Requirement already satisfied: tomli>=1.0.0 in /opt/conda/lib/python3.7/site-packages (from setuptools-scm>=4->matplotlib->detectron2==0.6) (1.2.2)\r\n",
      "Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /opt/conda/lib/python3.7/site-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard->detectron2==0.6) (0.4.8)\r\n",
      "Requirement already satisfied: oauthlib>=3.0.0 in /opt/conda/lib/python3.7/site-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard->detectron2==0.6) (3.1.1)\r\n",
      "Building wheels for collected packages: detectron2, fvcore, antlr4-python3-runtime, pycocotools\r\n",
      "  Building wheel for detectron2 (setup.py) ... \u001b[?25l-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \bdone\r\n",
      "\u001b[?25h  Created wheel for detectron2: filename=detectron2-0.6-cp37-cp37m-linux_x86_64.whl size=6580602 sha256=1a59fa79f3003c85cdb82dab25581c6114c5ad4be75c3133ba005dcf11a9d0fe\r\n",
      "  Stored in directory: /tmp/pip-ephem-wheel-cache-o_tc3s8e/wheels/07/dc/32/0322cb484dbefab8b9366bfedbaff5060ac7d149d69c27ca5d\r\n",
      "  Building wheel for fvcore (setup.py) ... \u001b[?25l-\b \b\\\b \bdone\r\n",
      "\u001b[?25h  Created wheel for fvcore: filename=fvcore-0.1.5.post20220305-py3-none-any.whl size=61218 sha256=76ed5a10ec000448ae1694c3b1ad82c0154d52d28501560ef8fe03aa55123f56\r\n",
      "  Stored in directory: /root/.cache/pip/wheels/b5/b7/6e/43b1693d06fac3633af48db68557513b0a37ab38b0a8b798f9\r\n",
      "  Building wheel for antlr4-python3-runtime (setup.py) ... \u001b[?25l-\b \b\\\b \bdone\r\n",
      "\u001b[?25h  Created wheel for antlr4-python3-runtime: filename=antlr4_python3_runtime-4.8-py3-none-any.whl size=141230 sha256=f12d7b284aaeb1467a825ba8d663298d591c92067a9788f98f1c526bd3ee0ecd\r\n",
      "  Stored in directory: /root/.cache/pip/wheels/ca/33/b7/336836125fc9bb4ceaa4376d8abca10ca8bc84ddc824baea6c\r\n",
      "  Building wheel for pycocotools (pyproject.toml) ... \u001b[?25l-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \b|\b \bdone\r\n",
      "\u001b[?25h  Created wheel for pycocotools: filename=pycocotools-2.0.4-cp37-cp37m-linux_x86_64.whl size=370026 sha256=9af34e10f2f17ec54ec56224fa29bc04ef9fec811f129de8ab0c741e1186d554\r\n",
      "  Stored in directory: /root/.cache/pip/wheels/a3/5f/fa/f011e578cc76e1fc5be8dce30b3eb9fd00f337e744b3bba59b\r\n",
      "Successfully built detectron2 fvcore antlr4-python3-runtime pycocotools\r\n",
      "Installing collected packages: antlr4-python3-runtime, omegaconf, iopath, importlib-resources, pycocotools, hydra-core, fvcore, black, detectron2\r\n",
      "  Attempting uninstall: importlib-resources\r\n",
      "    Found existing installation: importlib-resources 5.4.0\r\n",
      "    Uninstalling importlib-resources-5.4.0:\r\n",
      "      Successfully uninstalled importlib-resources-5.4.0\r\n",
      "  Attempting uninstall: black\r\n",
      "    Found existing installation: black 21.10b0\r\n",
      "    Uninstalling black-21.10b0:\r\n",
      "      Successfully uninstalled black-21.10b0\r\n",
      "Successfully installed antlr4-python3-runtime-4.8 black-21.4b2 detectron2-0.6 fvcore-0.1.5.post20220305 hydra-core-1.1.2 importlib-resources-5.2.3 iopath-0.1.9 omegaconf-2.1.2 pycocotools-2.0.4\r\n",
      "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\r\n"
     ]
    }
   ],
   "source": [
    "!pip install 'git+https://github.com/facebookresearch/detectron2.git'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "9f510e88",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:39:30.607664Z",
     "iopub.status.busy": "2022-04-12T20:39:30.606763Z",
     "iopub.status.idle": "2022-04-12T20:39:31.711296Z",
     "shell.execute_reply": "2022-04-12T20:39:31.710828Z"
    },
    "executionInfo": {
     "elapsed": 3,
     "status": "ok",
     "timestamp": 1638913128554,
     "user": {
      "displayName": "Konrad Banachewicz",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhI23dFPH4TCCx-UXI_tTLJRhGGqpOO_xMfc26jqSA=s64",
      "userId": "15372017465249196723"
     },
     "user_tz": -60
    },
    "id": "m9vQqUjx_gCJ",
    "papermill": {
     "duration": 1.159955,
     "end_time": "2022-04-12T20:39:31.711426",
     "exception": false,
     "start_time": "2022-04-12T20:39:30.551471",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "from datetime import datetime\n",
    "import os\n",
    "\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import pycocotools.mask as mask_util\n",
    "import detectron2\n",
    "from pathlib import Path\n",
    "import random, cv2, os\n",
    "import matplotlib.pyplot as plt\n",
    "# import some common detectron2 utilities\n",
    "from detectron2 import model_zoo\n",
    "from detectron2.engine import DefaultPredictor, DefaultTrainer\n",
    "from detectron2.config import get_cfg\n",
    "from detectron2.utils.visualizer import Visualizer, ColorMode\n",
    "from detectron2.data import MetadataCatalog, DatasetCatalog\n",
    "from detectron2.data.datasets import register_coco_instances\n",
    "from detectron2.utils.logger import setup_logger\n",
    "from detectron2.evaluation.evaluator import DatasetEvaluator\n",
    "from detectron2.engine import BestCheckpointer\n",
    "from detectron2.checkpoint import DetectionCheckpointer\n",
    "setup_logger()\n",
    "\n",
    "import torch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "4f5668fe",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:39:31.796220Z",
     "iopub.status.busy": "2022-04-12T20:39:31.795406Z",
     "iopub.status.idle": "2022-04-12T20:39:31.797320Z",
     "shell.execute_reply": "2022-04-12T20:39:31.797744Z",
     "shell.execute_reply.started": "2022-01-06T22:14:34.375602Z"
    },
    "executionInfo": {
     "elapsed": 8,
     "status": "ok",
     "timestamp": 1638913556458,
     "user": {
      "displayName": "Konrad Banachewicz",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhI23dFPH4TCCx-UXI_tTLJRhGGqpOO_xMfc26jqSA=s64",
      "userId": "15372017465249196723"
     },
     "user_tz": -60
    },
    "id": "f244a057",
    "papermill": {
     "duration": 0.045647,
     "end_time": "2022-04-12T20:39:31.797870",
     "exception": false,
     "start_time": "2022-04-12T20:39:31.752223",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "class CFG:\n",
    "    wfold = 4\n",
    "    data_folder = '../input/sartorius-cell-instance-segmentation/'\n",
    "    anno_folder = '../input/sartoriusannotations/'\n",
    "    model_arch = 'mask_rcnn_R_50_FPN_3x.yaml'\n",
    "    nof_iters = 10000\n",
    "    seed = 45"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "87dddde7",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:39:31.882109Z",
     "iopub.status.busy": "2022-04-12T20:39:31.881451Z",
     "iopub.status.idle": "2022-04-12T20:39:31.886557Z",
     "shell.execute_reply": "2022-04-12T20:39:31.886088Z"
    },
    "executionInfo": {
     "elapsed": 7,
     "status": "ok",
     "timestamp": 1638913556458,
     "user": {
      "displayName": "Konrad Banachewicz",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhI23dFPH4TCCx-UXI_tTLJRhGGqpOO_xMfc26jqSA=s64",
      "userId": "15372017465249196723"
     },
     "user_tz": -60
    },
    "id": "MQSXy_HJXDGj",
    "papermill": {
     "duration": 0.049357,
     "end_time": "2022-04-12T20:39:31.886666",
     "exception": false,
     "start_time": "2022-04-12T20:39:31.837309",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "def seed_everything(seed):\n",
    "    random.seed(seed)\n",
    "    os.environ['PYTHONHASHSEED'] = str(seed)\n",
    "    np.random.seed(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "\n",
    "seed_everything(CFG.seed)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9cb3ff4e",
   "metadata": {
    "papermill": {
     "duration": 0.03983,
     "end_time": "2022-04-12T20:39:31.967403",
     "exception": false,
     "start_time": "2022-04-12T20:39:31.927573",
     "status": "completed"
    },
    "tags": []
   },
   "source": [
    "# Prepare annotations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "761c9523",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:39:32.052166Z",
     "iopub.status.busy": "2022-04-12T20:39:32.051317Z",
     "iopub.status.idle": "2022-04-12T20:39:32.053093Z",
     "shell.execute_reply": "2022-04-12T20:39:32.053551Z",
     "shell.execute_reply.started": "2022-01-06T19:56:40.554924Z"
    },
    "papermill": {
     "duration": 0.046106,
     "end_time": "2022-04-12T20:39:32.053681",
     "exception": false,
     "start_time": "2022-04-12T20:39:32.007575",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# Done in: https://www.kaggle.com/konradb/prepare-annotations"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bbbc152b",
   "metadata": {
    "id": "ac0040ca",
    "papermill": {
     "duration": 0.039313,
     "end_time": "2022-04-12T20:39:32.132480",
     "exception": false,
     "start_time": "2022-04-12T20:39:32.093167",
     "status": "completed"
    },
    "tags": []
   },
   "source": [
    "# Functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "7aaf8469",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:39:32.225981Z",
     "iopub.status.busy": "2022-04-12T20:39:32.225154Z",
     "iopub.status.idle": "2022-04-12T20:39:32.227150Z",
     "shell.execute_reply": "2022-04-12T20:39:32.227531Z"
    },
    "executionInfo": {
     "elapsed": 6,
     "status": "ok",
     "timestamp": 1638913556458,
     "user": {
      "displayName": "Konrad Banachewicz",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhI23dFPH4TCCx-UXI_tTLJRhGGqpOO_xMfc26jqSA=s64",
      "userId": "15372017465249196723"
     },
     "user_tz": -60
    },
    "id": "f93d93bb",
    "papermill": {
     "duration": 0.055609,
     "end_time": "2022-04-12T20:39:32.227662",
     "exception": false,
     "start_time": "2022-04-12T20:39:32.172053",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "# Taken from https://www.kaggle.com/theoviel/competition-metric-map-iou\n",
    "def precision_at(threshold, iou):\n",
    "    matches = iou > threshold\n",
    "    true_positives = np.sum(matches, axis=1) == 1  # Correct objects\n",
    "    false_positives = np.sum(matches, axis=0) == 0  # Missed objects\n",
    "    false_negatives = np.sum(matches, axis=1) == 0  # Extra objects\n",
    "    return np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)\n",
    "\n",
    "def score(pred, targ):\n",
    "    pred_masks = pred['instances'].pred_masks.cpu().numpy()\n",
    "    enc_preds = [mask_util.encode(np.asarray(p, order='F')) for p in pred_masks]\n",
    "    enc_targs = list(map(lambda x:x['segmentation'], targ))\n",
    "    ious = mask_util.iou(enc_preds, enc_targs, [0]*len(enc_targs))\n",
    "    prec = []\n",
    "    for t in np.arange(0.5, 1.0, 0.05):\n",
    "        tp, fp, fn = precision_at(t, ious)\n",
    "        p = tp / (tp + fp + fn)\n",
    "        prec.append(p)\n",
    "    return np.mean(prec)\n",
    "\n",
    "class MAPIOUEvaluator(DatasetEvaluator):\n",
    "    def __init__(self, dataset_name):\n",
    "        dataset_dicts = DatasetCatalog.get(dataset_name)\n",
    "        self.annotations_cache = {item['image_id']:item['annotations'] for item in dataset_dicts}\n",
    "            \n",
    "    def reset(self):\n",
    "        self.scores = []\n",
    "\n",
    "    def process(self, inputs, outputs):\n",
    "        for inp, out in zip(inputs, outputs):\n",
    "            if len(out['instances']) == 0:\n",
    "                self.scores.append(0)    \n",
    "            else:\n",
    "                targ = self.annotations_cache[inp['image_id']]\n",
    "                self.scores.append(score(out, targ))\n",
    "\n",
    "    def evaluate(self):\n",
    "        return {\"MaP IoU\": np.mean(self.scores)}\n",
    "\n",
    "class Trainer(DefaultTrainer):\n",
    "    @classmethod\n",
    "    def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n",
    "        return MAPIOUEvaluator(dataset_name)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "2d41ab2e",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:39:32.312698Z",
     "iopub.status.busy": "2022-04-12T20:39:32.311919Z",
     "iopub.status.idle": "2022-04-12T20:39:32.313855Z",
     "shell.execute_reply": "2022-04-12T20:39:32.314300Z"
    },
    "executionInfo": {
     "elapsed": 9,
     "status": "ok",
     "timestamp": 1638913556461,
     "user": {
      "displayName": "Konrad Banachewicz",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhI23dFPH4TCCx-UXI_tTLJRhGGqpOO_xMfc26jqSA=s64",
      "userId": "15372017465249196723"
     },
     "user_tz": -60
    },
    "id": "ypPNYD2J-Z7q",
    "papermill": {
     "duration": 0.047432,
     "end_time": "2022-04-12T20:39:32.314422",
     "exception": false,
     "start_time": "2022-04-12T20:39:32.266990",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "class Trainer(DefaultTrainer):\n",
    "    @classmethod\n",
    "    def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n",
    "        return MAPIOUEvaluator(dataset_name)\n",
    "\n",
    "    def build_hooks(self):\n",
    "\n",
    "        # copy of cfg\n",
    "        cfg = self.cfg.clone()\n",
    "\n",
    "        # build the original model hooks\n",
    "        hooks = super().build_hooks()\n",
    "\n",
    "        # add the best checkpointer hook\n",
    "        hooks.insert(-1, BestCheckpointer(cfg.TEST.EVAL_PERIOD, \n",
    "                                         DetectionCheckpointer(self.model, cfg.OUTPUT_DIR),\n",
    "                                         \"MaP IoU\",\n",
    "                                         \"max\",\n",
    "                                         ))\n",
    "        return hooks"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a3e58235",
   "metadata": {
    "id": "0818fb28",
    "papermill": {
     "duration": 0.040299,
     "end_time": "2022-04-12T20:39:32.394452",
     "exception": false,
     "start_time": "2022-04-12T20:39:32.354153",
     "status": "completed"
    },
    "tags": []
   },
   "source": [
    "# Data"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a587501b",
   "metadata": {
    "id": "abde150d",
    "papermill": {
     "duration": 0.039616,
     "end_time": "2022-04-12T20:39:32.473879",
     "exception": false,
     "start_time": "2022-04-12T20:39:32.434263",
     "status": "completed"
    },
    "tags": []
   },
   "source": [
    "Setup training / validation split for this fold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "9cfd9b6c",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:39:32.559899Z",
     "iopub.status.busy": "2022-04-12T20:39:32.559222Z",
     "iopub.status.idle": "2022-04-12T20:39:35.886967Z",
     "shell.execute_reply": "2022-04-12T20:39:35.887612Z"
    },
    "executionInfo": {
     "elapsed": 1955,
     "status": "ok",
     "timestamp": 1638913558408,
     "user": {
      "displayName": "Konrad Banachewicz",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhI23dFPH4TCCx-UXI_tTLJRhGGqpOO_xMfc26jqSA=s64",
      "userId": "15372017465249196723"
     },
     "user_tz": -60
    },
    "id": "56bf8b45",
    "outputId": "69a2fb7b-f139-4e07-8b37-4f7d9dd592b0",
    "papermill": {
     "duration": 3.37415,
     "end_time": "2022-04-12T20:39:35.887839",
     "exception": false,
     "start_time": "2022-04-12T20:39:32.513689",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m[04/12 20:39:34 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_train_f4.json takes 1.75 seconds.\n",
      "\u001b[32m[04/12 20:39:34 d2.data.datasets.coco]: \u001b[0mLoaded 485 images in COCO format from ../input/sartoriusannotations/annotations_train_f4.json\n"
     ]
    }
   ],
   "source": [
    "dataDir=Path(CFG.data_folder)\n",
    "register_coco_instances('sartorius_train',{}, CFG.anno_folder + 'annotations_train_f'+str(CFG.wfold)+'.json', dataDir)\n",
    "register_coco_instances('sartorius_val',{}, CFG.anno_folder + 'annotations_valid_f'+str(CFG.wfold)+'.json', dataDir)\n",
    "metadata = MetadataCatalog.get('sartorius_train')\n",
    "train_ds = DatasetCatalog.get('sartorius_train')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "586964da",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:39:36.042945Z",
     "iopub.status.busy": "2022-04-12T20:39:36.041982Z",
     "iopub.status.idle": "2022-04-12T20:39:39.325193Z",
     "shell.execute_reply": "2022-04-12T20:39:39.324689Z"
    },
    "executionInfo": {
     "elapsed": 2499,
     "status": "ok",
     "timestamp": 1638913560904,
     "user": {
      "displayName": "Konrad Banachewicz",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhI23dFPH4TCCx-UXI_tTLJRhGGqpOO_xMfc26jqSA=s64",
      "userId": "15372017465249196723"
     },
     "user_tz": -60
    },
    "id": "zYKPpFeTeMGO",
    "outputId": "c1c4dd00-6f80-4ec3-e74a-38f10c400b45",
    "papermill": {
     "duration": 3.359079,
     "end_time": "2022-04-12T20:39:39.325323",
     "exception": false,
     "start_time": "2022-04-12T20:39:35.966244",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m[04/12 20:39:37 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_train_f4.json takes 1.53 seconds.\n",
      "\u001b[32m[04/12 20:39:37 d2.data.datasets.coco]: \u001b[0mLoaded 485 images in COCO format from ../input/sartoriusannotations/annotations_train_f4.json\n"
     ]
    }
   ],
   "source": [
    "train_ds = DatasetCatalog.get('sartorius_train')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c2b6072e",
   "metadata": {
    "id": "363e371d",
    "papermill": {
     "duration": 0.04199,
     "end_time": "2022-04-12T20:39:39.409736",
     "exception": false,
     "start_time": "2022-04-12T20:39:39.367746",
     "status": "completed"
    },
    "tags": []
   },
   "source": [
    "# Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "ca7f3850",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:39:39.503981Z",
     "iopub.status.busy": "2022-04-12T20:39:39.503359Z",
     "iopub.status.idle": "2022-04-12T20:39:42.546262Z",
     "shell.execute_reply": "2022-04-12T20:39:42.545783Z"
    },
    "executionInfo": {
     "elapsed": 2180,
     "status": "ok",
     "timestamp": 1638913563073,
     "user": {
      "displayName": "Konrad Banachewicz",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhI23dFPH4TCCx-UXI_tTLJRhGGqpOO_xMfc26jqSA=s64",
      "userId": "15372017465249196723"
     },
     "user_tz": -60
    },
    "id": "94ecf49b",
    "outputId": "04e776f2-3980-4bce-db2a-4de8fcad18db",
    "papermill": {
     "duration": 3.094891,
     "end_time": "2022-04-12T20:39:42.546396",
     "exception": false,
     "start_time": "2022-04-12T20:39:39.451505",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m[04/12 20:39:40 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_train_f4.json takes 1.43 seconds.\n",
      "\u001b[32m[04/12 20:39:40 d2.data.datasets.coco]: \u001b[0mLoaded 485 images in COCO format from ../input/sartoriusannotations/annotations_train_f4.json\n"
     ]
    }
   ],
   "source": [
    "cfg = get_cfg()\n",
    "cfg.INPUT.MASK_FORMAT='bitmask'\n",
    "cfg.merge_from_file(model_zoo.get_config_file('COCO-InstanceSegmentation/' + CFG.model_arch))\n",
    "cfg.DATASETS.TRAIN = (\"sartorius_train\",)\n",
    "cfg.DATASETS.TEST = (\"sartorius_val\",)\n",
    "cfg.DATALOADER.NUM_WORKERS = 2\n",
    "cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url('COCO-InstanceSegmentation/' + CFG.model_arch) \n",
    "cfg.SOLVER.IMS_PER_BATCH = 2\n",
    "cfg.SOLVER.BASE_LR = 0.001\n",
    "cfg.SOLVER.MAX_ITER = CFG.nof_iters    \n",
    "cfg.SOLVER.STEPS = []        \n",
    "cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512    \n",
    "cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3  \n",
    "cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = .4\n",
    "cfg.TEST.EVAL_PERIOD = len(DatasetCatalog.get('sartorius_train')) // cfg.SOLVER.IMS_PER_BATCH  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "0ad6b196",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-04-12T20:39:42.637079Z",
     "iopub.status.busy": "2022-04-12T20:39:42.635149Z",
     "iopub.status.idle": "2022-04-13T01:11:54.769002Z",
     "shell.execute_reply": "2022-04-13T01:11:54.768465Z"
    },
    "executionInfo": {
     "elapsed": 7630099,
     "status": "ok",
     "timestamp": 1638933812164,
     "user": {
      "displayName": "Konrad Banachewicz",
      "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhI23dFPH4TCCx-UXI_tTLJRhGGqpOO_xMfc26jqSA=s64",
      "userId": "15372017465249196723"
     },
     "user_tz": -60
    },
    "id": "db88dac3",
    "outputId": "3fbdd1e4-3344-4f74-fe18-2e8d91ae3b34",
    "papermill": {
     "duration": 16332.180096,
     "end_time": "2022-04-13T01:11:54.769156",
     "exception": false,
     "start_time": "2022-04-12T20:39:42.589060",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m[04/12 20:39:46 d2.engine.defaults]: \u001b[0mModel:\n",
      "GeneralizedRCNN(\n",
      "  (backbone): FPN(\n",
      "    (fpn_lateral2): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))\n",
      "    (fpn_output2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (fpn_lateral3): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))\n",
      "    (fpn_output3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (fpn_lateral4): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1))\n",
      "    (fpn_output4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (fpn_lateral5): Conv2d(2048, 256, kernel_size=(1, 1), stride=(1, 1))\n",
      "    (fpn_output5): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (top_block): LastLevelMaxPool()\n",
      "    (bottom_up): ResNet(\n",
      "      (stem): BasicStem(\n",
      "        (conv1): Conv2d(\n",
      "          3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False\n",
      "          (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05)\n",
      "        )\n",
      "      )\n",
      "      (res2): Sequential(\n",
      "        (0): BottleneckBlock(\n",
      "          (shortcut): Conv2d(\n",
      "            64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv1): Conv2d(\n",
      "            64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (1): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (2): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=64, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "      )\n",
      "      (res3): Sequential(\n",
      "        (0): BottleneckBlock(\n",
      "          (shortcut): Conv2d(\n",
      "            256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "          (conv1): Conv2d(\n",
      "            256, 128, kernel_size=(1, 1), stride=(2, 2), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (1): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (2): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (3): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=128, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "      )\n",
      "      (res4): Sequential(\n",
      "        (0): BottleneckBlock(\n",
      "          (shortcut): Conv2d(\n",
      "            512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05)\n",
      "          )\n",
      "          (conv1): Conv2d(\n",
      "            512, 256, kernel_size=(1, 1), stride=(2, 2), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (1): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (2): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (3): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (4): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (5): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=256, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=1024, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "      )\n",
      "      (res5): Sequential(\n",
      "        (0): BottleneckBlock(\n",
      "          (shortcut): Conv2d(\n",
      "            1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=2048, eps=1e-05)\n",
      "          )\n",
      "          (conv1): Conv2d(\n",
      "            1024, 512, kernel_size=(1, 1), stride=(2, 2), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=2048, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (1): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=2048, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "        (2): BottleneckBlock(\n",
      "          (conv1): Conv2d(\n",
      "            2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "          (conv2): Conv2d(\n",
      "            512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=512, eps=1e-05)\n",
      "          )\n",
      "          (conv3): Conv2d(\n",
      "            512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False\n",
      "            (norm): FrozenBatchNorm2d(num_features=2048, eps=1e-05)\n",
      "          )\n",
      "        )\n",
      "      )\n",
      "    )\n",
      "  )\n",
      "  (proposal_generator): RPN(\n",
      "    (rpn_head): StandardRPNHead(\n",
      "      (conv): Conv2d(\n",
      "        256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)\n",
      "        (activation): ReLU()\n",
      "      )\n",
      "      (objectness_logits): Conv2d(256, 3, kernel_size=(1, 1), stride=(1, 1))\n",
      "      (anchor_deltas): Conv2d(256, 12, kernel_size=(1, 1), stride=(1, 1))\n",
      "    )\n",
      "    (anchor_generator): DefaultAnchorGenerator(\n",
      "      (cell_anchors): BufferList()\n",
      "    )\n",
      "  )\n",
      "  (roi_heads): StandardROIHeads(\n",
      "    (box_pooler): ROIPooler(\n",
      "      (level_poolers): ModuleList(\n",
      "        (0): ROIAlign(output_size=(7, 7), spatial_scale=0.25, sampling_ratio=0, aligned=True)\n",
      "        (1): ROIAlign(output_size=(7, 7), spatial_scale=0.125, sampling_ratio=0, aligned=True)\n",
      "        (2): ROIAlign(output_size=(7, 7), spatial_scale=0.0625, sampling_ratio=0, aligned=True)\n",
      "        (3): ROIAlign(output_size=(7, 7), spatial_scale=0.03125, sampling_ratio=0, aligned=True)\n",
      "      )\n",
      "    )\n",
      "    (box_head): FastRCNNConvFCHead(\n",
      "      (flatten): Flatten(start_dim=1, end_dim=-1)\n",
      "      (fc1): Linear(in_features=12544, out_features=1024, bias=True)\n",
      "      (fc_relu1): ReLU()\n",
      "      (fc2): Linear(in_features=1024, out_features=1024, bias=True)\n",
      "      (fc_relu2): ReLU()\n",
      "    )\n",
      "    (box_predictor): FastRCNNOutputLayers(\n",
      "      (cls_score): Linear(in_features=1024, out_features=4, bias=True)\n",
      "      (bbox_pred): Linear(in_features=1024, out_features=12, bias=True)\n",
      "    )\n",
      "    (mask_pooler): ROIPooler(\n",
      "      (level_poolers): ModuleList(\n",
      "        (0): ROIAlign(output_size=(14, 14), spatial_scale=0.25, sampling_ratio=0, aligned=True)\n",
      "        (1): ROIAlign(output_size=(14, 14), spatial_scale=0.125, sampling_ratio=0, aligned=True)\n",
      "        (2): ROIAlign(output_size=(14, 14), spatial_scale=0.0625, sampling_ratio=0, aligned=True)\n",
      "        (3): ROIAlign(output_size=(14, 14), spatial_scale=0.03125, sampling_ratio=0, aligned=True)\n",
      "      )\n",
      "    )\n",
      "    (mask_head): MaskRCNNConvUpsampleHead(\n",
      "      (mask_fcn1): Conv2d(\n",
      "        256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)\n",
      "        (activation): ReLU()\n",
      "      )\n",
      "      (mask_fcn2): Conv2d(\n",
      "        256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)\n",
      "        (activation): ReLU()\n",
      "      )\n",
      "      (mask_fcn3): Conv2d(\n",
      "        256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)\n",
      "        (activation): ReLU()\n",
      "      )\n",
      "      (mask_fcn4): Conv2d(\n",
      "        256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)\n",
      "        (activation): ReLU()\n",
      "      )\n",
      "      (deconv): ConvTranspose2d(256, 256, kernel_size=(2, 2), stride=(2, 2))\n",
      "      (deconv_relu): ReLU()\n",
      "      (predictor): Conv2d(256, 3, kernel_size=(1, 1), stride=(1, 1))\n",
      "    )\n",
      "  )\n",
      ")\n",
      "\u001b[32m[04/12 20:39:47 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_train_f4.json takes 1.22 seconds.\n",
      "\u001b[32m[04/12 20:39:47 d2.data.datasets.coco]: \u001b[0mLoaded 485 images in COCO format from ../input/sartoriusannotations/annotations_train_f4.json\n",
      "\u001b[32m[04/12 20:39:48 d2.data.build]: \u001b[0mRemoved 0 images with no usable annotations. 485 images left.\n",
      "\u001b[32m[04/12 20:39:48 d2.data.build]: \u001b[0mDistribution of instances among all 3 categories:\n",
      "\u001b[36m|  category  | #instances   |  category  | #instances   |  category  | #instances   |\n",
      "|:----------:|:-------------|:----------:|:-------------|:----------:|:-------------|\n",
      "|   shsy5y   | 41952        |   astro    | 8360         |    cort    | 8556         |\n",
      "|            |              |            |              |            |              |\n",
      "|   total    | 58868        |            |              |            |              |\u001b[0m\n",
      "\u001b[32m[04/12 20:39:48 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in training: [ResizeShortestEdge(short_edge_length=(640, 672, 704, 736, 768, 800), max_size=1333, sample_style='choice'), RandomFlip()]\n",
      "\u001b[32m[04/12 20:39:48 d2.data.build]: \u001b[0mUsing training sampler TrainingSampler\n",
      "\u001b[32m[04/12 20:39:48 d2.data.common]: \u001b[0mSerializing 485 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 20:39:49 d2.data.common]: \u001b[0mSerialized dataset takes 6.79 MiB\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "model_final_f10217.pkl: 178MB [00:11, 15.5MB/s]                           \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m[04/12 20:40:05 d2.engine.train_loop]: \u001b[0mStarting training from iteration 0\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/lib/python3.7/site-packages/detectron2/data/detection_utils.py:433: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at  /usr/local/src/pytorch/torch/csrc/utils/tensor_numpy.cpp:174.)\n",
      "  torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])\n",
      "/opt/conda/lib/python3.7/site-packages/detectron2/data/detection_utils.py:433: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at  /usr/local/src/pytorch/torch/csrc/utils/tensor_numpy.cpp:174.)\n",
      "  torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[32m[04/12 20:40:36 d2.utils.events]: \u001b[0m eta: 1:43:40  iter: 19  total_loss: 5.77  loss_cls: 1.404  loss_box_reg: 0.273  loss_mask: 0.6942  loss_rpn_cls: 2.993  loss_rpn_loc: 0.3093  time: 1.2865  data_time: 0.6897  lr: 1.9981e-05  max_mem: 5047M\n",
      "\u001b[32m[04/12 20:41:18 d2.utils.events]: \u001b[0m eta: 2:21:44  iter: 39  total_loss: 2.918  loss_cls: 1.168  loss_box_reg: 0.3203  loss_mask: 0.6887  loss_rpn_cls: 0.3942  loss_rpn_loc: 0.3307  time: 1.7156  data_time: 1.4681  lr: 3.9961e-05  max_mem: 6040M\n",
      "\u001b[32m[04/12 20:41:42 d2.utils.events]: \u001b[0m eta: 2:08:43  iter: 59  total_loss: 2.457  loss_cls: 0.7868  loss_box_reg: 0.4066  loss_mask: 0.6755  loss_rpn_cls: 0.339  loss_rpn_loc: 0.2742  time: 1.5356  data_time: 0.6028  lr: 5.9941e-05  max_mem: 6040M\n",
      "\u001b[32m[04/12 20:42:08 d2.utils.events]: \u001b[0m eta: 2:04:24  iter: 79  total_loss: 2.319  loss_cls: 0.6876  loss_box_reg: 0.402  loss_mask: 0.6653  loss_rpn_cls: 0.2746  loss_rpn_loc: 0.2876  time: 1.4839  data_time: 0.7185  lr: 7.9921e-05  max_mem: 6040M\n",
      "\u001b[32m[04/12 20:42:34 d2.utils.events]: \u001b[0m eta: 2:05:37  iter: 99  total_loss: 2.489  loss_cls: 0.7185  loss_box_reg: 0.4838  loss_mask: 0.6409  loss_rpn_cls: 0.3062  loss_rpn_loc: 0.2843  time: 1.4467  data_time: 0.6840  lr: 9.9901e-05  max_mem: 6040M\n",
      "\u001b[32m[04/12 20:42:54 d2.utils.events]: \u001b[0m eta: 2:01:31  iter: 119  total_loss: 2.316  loss_cls: 0.67  loss_box_reg: 0.5914  loss_mask: 0.6131  loss_rpn_cls: 0.196  loss_rpn_loc: 0.2779  time: 1.3710  data_time: 0.4086  lr: 0.00011988  max_mem: 6040M\n",
      "\u001b[32m[04/12 20:43:17 d2.utils.events]: \u001b[0m eta: 1:59:59  iter: 139  total_loss: 2.434  loss_cls: 0.7347  loss_box_reg: 0.6485  loss_mask: 0.5784  loss_rpn_cls: 0.21  loss_rpn_loc: 0.2585  time: 1.3354  data_time: 0.4975  lr: 0.00013986  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:43:54 d2.utils.events]: \u001b[0m eta: 2:01:31  iter: 159  total_loss: 2.364  loss_cls: 0.7371  loss_box_reg: 0.5823  loss_mask: 0.5655  loss_rpn_cls: 0.2212  loss_rpn_loc: 0.2758  time: 1.4019  data_time: 1.2132  lr: 0.00015984  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:44:24 d2.utils.events]: \u001b[0m eta: 2:02:13  iter: 179  total_loss: 2.339  loss_cls: 0.7181  loss_box_reg: 0.5848  loss_mask: 0.508  loss_rpn_cls: 0.2036  loss_rpn_loc: 0.2702  time: 1.4105  data_time: 0.8807  lr: 0.00017982  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:44:57 d2.utils.events]: \u001b[0m eta: 2:05:24  iter: 199  total_loss: 2.232  loss_cls: 0.6903  loss_box_reg: 0.6176  loss_mask: 0.4861  loss_rpn_cls: 0.1731  loss_rpn_loc: 0.2609  time: 1.4382  data_time: 1.0156  lr: 0.0001998  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:45:32 d2.utils.events]: \u001b[0m eta: 2:06:15  iter: 219  total_loss: 2.186  loss_cls: 0.6801  loss_box_reg: 0.5951  loss_mask: 0.4415  loss_rpn_cls: 0.1893  loss_rpn_loc: 0.2774  time: 1.4654  data_time: 1.0679  lr: 0.00021978  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:46:11 d2.utils.events]: \u001b[0m eta: 2:06:57  iter: 239  total_loss: 2.15  loss_cls: 0.6848  loss_box_reg: 0.5983  loss_mask: 0.4175  loss_rpn_cls: 0.1739  loss_rpn_loc: 0.2919  time: 1.5062  data_time: 1.2893  lr: 0.00023976  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:46:15 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_valid_f4.json takes 1.27 seconds.\n",
      "\u001b[32m[04/12 20:46:15 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 20:46:15 d2.data.build]: \u001b[0mDistribution of instances among all 3 categories:\n",
      "\u001b[36m|  category  | #instances   |  category  | #instances   |  category  | #instances   |\n",
      "|:----------:|:-------------|:----------:|:-------------|:----------:|:-------------|\n",
      "|    cort    | 2221         |   shsy5y   | 10334        |   astro    | 2162         |\n",
      "|            |              |            |              |            |              |\n",
      "|   total    | 14717        |            |              |            |              |\u001b[0m\n",
      "\u001b[32m[04/12 20:46:15 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 20:46:15 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 20:46:15 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 20:46:15 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 20:46:16 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 20:46:17 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0014 s/iter. Inference: 0.0829 s/iter. Eval: 0.0043 s/iter. Total: 0.0887 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 20:46:22 d2.evaluation.evaluator]: \u001b[0mInference done 69/121. Dataloading: 0.0066 s/iter. Inference: 0.0799 s/iter. Eval: 0.0050 s/iter. Total: 0.0916 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 20:46:27 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:10.801424 (0.093116 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 20:46:27 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:09 (0.080524 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 20:46:27 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 20:46:27 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.06269378546296314\n",
      "\u001b[32m[04/12 20:46:27 d2.engine.hooks]: \u001b[0mSaved first model at 0.06269 @ 241 steps\n",
      "\u001b[32m[04/12 20:46:50 d2.utils.events]: \u001b[0m eta: 2:05:44  iter: 259  total_loss: 2.074  loss_cls: 0.6085  loss_box_reg: 0.6491  loss_mask: 0.3894  loss_rpn_cls: 0.1723  loss_rpn_loc: 0.2463  time: 1.4838  data_time: 0.6008  lr: 0.00025974  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:47:22 d2.utils.events]: \u001b[0m eta: 2:08:51  iter: 279  total_loss: 2.01  loss_cls: 0.582  loss_box_reg: 0.582  loss_mask: 0.3903  loss_rpn_cls: 0.1867  loss_rpn_loc: 0.2501  time: 1.4926  data_time: 0.9575  lr: 0.00027972  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:47:48 d2.utils.events]: \u001b[0m eta: 2:06:10  iter: 299  total_loss: 2  loss_cls: 0.5614  loss_box_reg: 0.6856  loss_mask: 0.3799  loss_rpn_cls: 0.1385  loss_rpn_loc: 0.2422  time: 1.4817  data_time: 0.6905  lr: 0.0002997  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:48:18 d2.utils.events]: \u001b[0m eta: 2:05:54  iter: 319  total_loss: 1.893  loss_cls: 0.523  loss_box_reg: 0.6524  loss_mask: 0.3693  loss_rpn_cls: 0.1287  loss_rpn_loc: 0.2246  time: 1.4809  data_time: 0.8234  lr: 0.00031968  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:48:47 d2.utils.events]: \u001b[0m eta: 2:05:39  iter: 339  total_loss: 1.973  loss_cls: 0.5513  loss_box_reg: 0.6833  loss_mask: 0.3686  loss_rpn_cls: 0.1468  loss_rpn_loc: 0.234  time: 1.4783  data_time: 0.7924  lr: 0.00033966  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:49:26 d2.utils.events]: \u001b[0m eta: 2:06:51  iter: 359  total_loss: 1.768  loss_cls: 0.4821  loss_box_reg: 0.5333  loss_mask: 0.3534  loss_rpn_cls: 0.1401  loss_rpn_loc: 0.2583  time: 1.5058  data_time: 1.3074  lr: 0.00035964  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:49:55 d2.utils.events]: \u001b[0m eta: 2:06:42  iter: 379  total_loss: 1.835  loss_cls: 0.5038  loss_box_reg: 0.5593  loss_mask: 0.3598  loss_rpn_cls: 0.1597  loss_rpn_loc: 0.2568  time: 1.5037  data_time: 0.8375  lr: 0.00037962  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:50:21 d2.utils.events]: \u001b[0m eta: 2:06:19  iter: 399  total_loss: 1.747  loss_cls: 0.4633  loss_box_reg: 0.5885  loss_mask: 0.3372  loss_rpn_cls: 0.1271  loss_rpn_loc: 0.2322  time: 1.4934  data_time: 0.6657  lr: 0.0003996  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:50:57 d2.utils.events]: \u001b[0m eta: 2:06:11  iter: 419  total_loss: 1.836  loss_cls: 0.4666  loss_box_reg: 0.5646  loss_mask: 0.3542  loss_rpn_cls: 0.1718  loss_rpn_loc: 0.2512  time: 1.5066  data_time: 1.0864  lr: 0.00041958  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:51:23 d2.utils.events]: \u001b[0m eta: 2:05:04  iter: 439  total_loss: 1.759  loss_cls: 0.427  loss_box_reg: 0.6468  loss_mask: 0.3583  loss_rpn_cls: 0.1072  loss_rpn_loc: 0.23  time: 1.4988  data_time: 0.7128  lr: 0.00043956  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:51:53 d2.utils.events]: \u001b[0m eta: 2:07:34  iter: 459  total_loss: 1.879  loss_cls: 0.4802  loss_box_reg: 0.5778  loss_mask: 0.3519  loss_rpn_cls: 0.1609  loss_rpn_loc: 0.2529  time: 1.4985  data_time: 0.8224  lr: 0.00045954  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:52:26 d2.utils.events]: \u001b[0m eta: 2:08:44  iter: 479  total_loss: 1.751  loss_cls: 0.397  loss_box_reg: 0.5977  loss_mask: 0.3517  loss_rpn_cls: 0.1201  loss_rpn_loc: 0.2522  time: 1.5046  data_time: 1.0038  lr: 0.00047952  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:52:36 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_valid_f4.json takes 1.29 seconds.\n",
      "\u001b[32m[04/12 20:52:36 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 20:52:37 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 20:52:37 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 20:52:37 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 20:52:38 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 20:52:38 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 20:52:41 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0031 s/iter. Inference: 0.0965 s/iter. Eval: 0.0537 s/iter. Total: 0.1533 s/iter. ETA=0:00:16\n",
      "\u001b[32m[04/12 20:52:46 d2.evaluation.evaluator]: \u001b[0mInference done 52/121. Dataloading: 0.0023 s/iter. Inference: 0.0899 s/iter. Eval: 0.0348 s/iter. Total: 0.1270 s/iter. ETA=0:00:08\n",
      "\u001b[32m[04/12 20:52:51 d2.evaluation.evaluator]: \u001b[0mInference done 88/121. Dataloading: 0.0028 s/iter. Inference: 0.0923 s/iter. Eval: 0.0372 s/iter. Total: 0.1323 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 20:52:55 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.064678 (0.129868 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 20:52:55 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091184 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 20:52:55 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 20:52:55 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.17722458020263787\n",
      "\u001b[32m[04/12 20:52:55 d2.engine.hooks]: \u001b[0mSaved best model as latest eval score for MaP IoU is 0.17722, better than last best score 0.06269 @ iteration 241.\n",
      "\u001b[32m[04/12 20:53:21 d2.utils.events]: \u001b[0m eta: 2:10:29  iter: 499  total_loss: 1.836  loss_cls: 0.4676  loss_box_reg: 0.539  loss_mask: 0.3606  loss_rpn_cls: 0.1442  loss_rpn_loc: 0.2472  time: 1.5119  data_time: 1.0304  lr: 0.0004995  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:53:52 d2.utils.events]: \u001b[0m eta: 2:09:11  iter: 519  total_loss: 1.727  loss_cls: 0.4352  loss_box_reg: 0.6032  loss_mask: 0.3398  loss_rpn_cls: 0.1149  loss_rpn_loc: 0.261  time: 1.5147  data_time: 0.9641  lr: 0.00051948  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:54:29 d2.utils.events]: \u001b[0m eta: 2:09:32  iter: 539  total_loss: 1.72  loss_cls: 0.4356  loss_box_reg: 0.578  loss_mask: 0.3374  loss_rpn_cls: 0.1448  loss_rpn_loc: 0.263  time: 1.5269  data_time: 1.1693  lr: 0.00053946  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:54:52 d2.utils.events]: \u001b[0m eta: 2:07:39  iter: 559  total_loss: 1.715  loss_cls: 0.4326  loss_box_reg: 0.6303  loss_mask: 0.3316  loss_rpn_cls: 0.1086  loss_rpn_loc: 0.2319  time: 1.5130  data_time: 0.5082  lr: 0.00055944  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:55:21 d2.utils.events]: \u001b[0m eta: 2:06:34  iter: 579  total_loss: 1.859  loss_cls: 0.4957  loss_box_reg: 0.5877  loss_mask: 0.3544  loss_rpn_cls: 0.1376  loss_rpn_loc: 0.2455  time: 1.5110  data_time: 0.7974  lr: 0.00057942  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:55:52 d2.utils.events]: \u001b[0m eta: 2:08:06  iter: 599  total_loss: 1.735  loss_cls: 0.4129  loss_box_reg: 0.5783  loss_mask: 0.342  loss_rpn_cls: 0.1367  loss_rpn_loc: 0.2446  time: 1.5124  data_time: 0.9057  lr: 0.0005994  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:56:12 d2.utils.events]: \u001b[0m eta: 2:05:41  iter: 619  total_loss: 1.841  loss_cls: 0.456  loss_box_reg: 0.633  loss_mask: 0.3394  loss_rpn_cls: 0.1336  loss_rpn_loc: 0.233  time: 1.4953  data_time: 0.3755  lr: 0.00061938  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:56:45 d2.utils.events]: \u001b[0m eta: 2:05:25  iter: 639  total_loss: 1.66  loss_cls: 0.4025  loss_box_reg: 0.5621  loss_mask: 0.3371  loss_rpn_cls: 0.1189  loss_rpn_loc: 0.2195  time: 1.5006  data_time: 1.0204  lr: 0.00063936  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:57:12 d2.utils.events]: \u001b[0m eta: 2:05:09  iter: 659  total_loss: 1.705  loss_cls: 0.4017  loss_box_reg: 0.5801  loss_mask: 0.3299  loss_rpn_cls: 0.1082  loss_rpn_loc: 0.2291  time: 1.4948  data_time: 0.6745  lr: 0.00065934  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:57:49 d2.utils.events]: \u001b[0m eta: 2:05:13  iter: 679  total_loss: 1.68  loss_cls: 0.4087  loss_box_reg: 0.5481  loss_mask: 0.3423  loss_rpn_cls: 0.1552  loss_rpn_loc: 0.2505  time: 1.5058  data_time: 1.2008  lr: 0.00067932  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:58:18 d2.utils.events]: \u001b[0m eta: 2:05:46  iter: 699  total_loss: 1.731  loss_cls: 0.4115  loss_box_reg: 0.5845  loss_mask: 0.3263  loss_rpn_cls: 0.1061  loss_rpn_loc: 0.2306  time: 1.5049  data_time: 0.8161  lr: 0.0006993  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:58:46 d2.utils.events]: \u001b[0m eta: 2:04:41  iter: 719  total_loss: 1.64  loss_cls: 0.356  loss_box_reg: 0.5649  loss_mask: 0.3313  loss_rpn_cls: 0.1103  loss_rpn_loc: 0.2458  time: 1.5014  data_time: 0.7557  lr: 0.00071928  max_mem: 6281M\n",
      "\u001b[32m[04/12 20:58:55 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 20:58:55 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 20:58:55 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 20:58:55 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 20:58:56 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 20:58:57 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 20:58:59 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0020 s/iter. Inference: 0.0911 s/iter. Eval: 0.0376 s/iter. Total: 0.1308 s/iter. ETA=0:00:14\n",
      "\u001b[32m[04/12 20:59:04 d2.evaluation.evaluator]: \u001b[0mInference done 52/121. Dataloading: 0.0022 s/iter. Inference: 0.0894 s/iter. Eval: 0.0334 s/iter. Total: 0.1251 s/iter. ETA=0:00:08\n",
      "\u001b[32m[04/12 20:59:09 d2.evaluation.evaluator]: \u001b[0mInference done 92/121. Dataloading: 0.0021 s/iter. Inference: 0.0901 s/iter. Eval: 0.0345 s/iter. Total: 0.1268 s/iter. ETA=0:00:03\n",
      "\u001b[32m[04/12 20:59:12 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:14.501082 (0.125009 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 20:59:12 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.089466 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 20:59:12 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 20:59:12 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.21233669541890962\n",
      "\u001b[32m[04/12 20:59:13 d2.engine.hooks]: \u001b[0mSaved best model as latest eval score for MaP IoU is 0.21234, better than last best score 0.17722 @ iteration 483.\n",
      "\u001b[32m[04/12 20:59:34 d2.utils.events]: \u001b[0m eta: 2:05:13  iter: 739  total_loss: 1.642  loss_cls: 0.3896  loss_box_reg: 0.5461  loss_mask: 0.3376  loss_rpn_cls: 0.1431  loss_rpn_loc: 0.2399  time: 1.5004  data_time: 0.8355  lr: 0.00073926  max_mem: 6281M\n",
      "\u001b[32m[04/12 21:00:14 d2.utils.events]: \u001b[0m eta: 2:07:00  iter: 759  total_loss: 1.647  loss_cls: 0.4115  loss_box_reg: 0.5277  loss_mask: 0.3312  loss_rpn_cls: 0.1348  loss_rpn_loc: 0.2383  time: 1.5139  data_time: 1.3085  lr: 0.00075924  max_mem: 6281M\n",
      "\u001b[32m[04/12 21:00:48 d2.utils.events]: \u001b[0m eta: 2:06:44  iter: 779  total_loss: 1.609  loss_cls: 0.3896  loss_box_reg: 0.5708  loss_mask: 0.3325  loss_rpn_cls: 0.1011  loss_rpn_loc: 0.2404  time: 1.5185  data_time: 1.0229  lr: 0.00077922  max_mem: 6283M\n",
      "\u001b[32m[04/12 21:01:19 d2.utils.events]: \u001b[0m eta: 2:07:15  iter: 799  total_loss: 1.744  loss_cls: 0.4214  loss_box_reg: 0.5839  loss_mask: 0.3262  loss_rpn_cls: 0.1328  loss_rpn_loc: 0.2407  time: 1.5198  data_time: 0.9069  lr: 0.0007992  max_mem: 6283M\n",
      "\u001b[32m[04/12 21:01:43 d2.utils.events]: \u001b[0m eta: 2:06:05  iter: 819  total_loss: 1.629  loss_cls: 0.3692  loss_box_reg: 0.5789  loss_mask: 0.3273  loss_rpn_cls: 0.09896  loss_rpn_loc: 0.1972  time: 1.5121  data_time: 0.5866  lr: 0.00081918  max_mem: 6283M\n",
      "\u001b[32m[04/12 21:02:06 d2.utils.events]: \u001b[0m eta: 2:03:52  iter: 839  total_loss: 1.655  loss_cls: 0.3963  loss_box_reg: 0.5977  loss_mask: 0.3169  loss_rpn_cls: 0.09401  loss_rpn_loc: 0.2292  time: 1.5029  data_time: 0.5061  lr: 0.00083916  max_mem: 6283M\n",
      "\u001b[32m[04/12 21:02:34 d2.utils.events]: \u001b[0m eta: 2:03:36  iter: 859  total_loss: 1.7  loss_cls: 0.4188  loss_box_reg: 0.5868  loss_mask: 0.3306  loss_rpn_cls: 0.1455  loss_rpn_loc: 0.2516  time: 1.5006  data_time: 0.7831  lr: 0.00085914  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:03:06 d2.utils.events]: \u001b[0m eta: 2:02:39  iter: 879  total_loss: 1.707  loss_cls: 0.4356  loss_box_reg: 0.5652  loss_mask: 0.3253  loss_rpn_cls: 0.1071  loss_rpn_loc: 0.2358  time: 1.5026  data_time: 0.9272  lr: 0.00087912  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:03:38 d2.utils.events]: \u001b[0m eta: 2:02:23  iter: 899  total_loss: 1.575  loss_cls: 0.3693  loss_box_reg: 0.5612  loss_mask: 0.3247  loss_rpn_cls: 0.1091  loss_rpn_loc: 0.2288  time: 1.5053  data_time: 0.9922  lr: 0.0008991  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:04:11 d2.utils.events]: \u001b[0m eta: 2:02:47  iter: 919  total_loss: 1.629  loss_cls: 0.3668  loss_box_reg: 0.5766  loss_mask: 0.3454  loss_rpn_cls: 0.1004  loss_rpn_loc: 0.2214  time: 1.5075  data_time: 0.9737  lr: 0.00091908  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:04:39 d2.utils.events]: \u001b[0m eta: 2:03:23  iter: 939  total_loss: 1.592  loss_cls: 0.382  loss_box_reg: 0.5476  loss_mask: 0.3034  loss_rpn_cls: 0.09992  loss_rpn_loc: 0.2226  time: 1.5057  data_time: 0.8021  lr: 0.00093906  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:05:07 d2.utils.events]: \u001b[0m eta: 2:02:55  iter: 959  total_loss: 1.592  loss_cls: 0.3815  loss_box_reg: 0.5792  loss_mask: 0.3204  loss_rpn_cls: 0.1023  loss_rpn_loc: 0.2206  time: 1.5032  data_time: 0.7504  lr: 0.00095904  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:05:21 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:05:22 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 21:05:22 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 21:05:22 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 21:05:23 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:05:24 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 21:05:26 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0029 s/iter. Inference: 0.1024 s/iter. Eval: 0.0584 s/iter. Total: 0.1637 s/iter. ETA=0:00:18\n",
      "\u001b[32m[04/12 21:05:31 d2.evaluation.evaluator]: \u001b[0mInference done 36/121. Dataloading: 0.0049 s/iter. Inference: 0.1152 s/iter. Eval: 0.0730 s/iter. Total: 0.1933 s/iter. ETA=0:00:16\n",
      "\u001b[32m[04/12 21:05:36 d2.evaluation.evaluator]: \u001b[0mInference done 77/121. Dataloading: 0.0033 s/iter. Inference: 0.1008 s/iter. Eval: 0.0499 s/iter. Total: 0.1542 s/iter. ETA=0:00:06\n",
      "\u001b[32m[04/12 21:05:41 d2.evaluation.evaluator]: \u001b[0mInference done 114/121. Dataloading: 0.0029 s/iter. Inference: 0.0982 s/iter. Eval: 0.0465 s/iter. Total: 0.1477 s/iter. ETA=0:00:01\n",
      "\u001b[32m[04/12 21:05:42 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:17.028574 (0.146798 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:05:42 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:11 (0.097597 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:05:42 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 21:05:42 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.21931546035156177\n",
      "\u001b[32m[04/12 21:05:43 d2.engine.hooks]: \u001b[0mSaved best model as latest eval score for MaP IoU is 0.21932, better than last best score 0.21234 @ iteration 725.\n",
      "\u001b[32m[04/12 21:05:53 d2.utils.events]: \u001b[0m eta: 2:02:10  iter: 979  total_loss: 1.591  loss_cls: 0.3937  loss_box_reg: 0.5615  loss_mask: 0.3294  loss_rpn_cls: 0.1484  loss_rpn_loc: 0.2341  time: 1.4973  data_time: 0.5807  lr: 0.00097902  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:06:29 d2.utils.events]: \u001b[0m eta: 2:02:23  iter: 999  total_loss: 1.689  loss_cls: 0.4345  loss_box_reg: 0.5536  loss_mask: 0.3345  loss_rpn_cls: 0.1418  loss_rpn_loc: 0.2191  time: 1.5030  data_time: 1.1274  lr: 0.000999  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:06:57 d2.utils.events]: \u001b[0m eta: 2:02:35  iter: 1019  total_loss: 1.664  loss_cls: 0.384  loss_box_reg: 0.5874  loss_mask: 0.3309  loss_rpn_cls: 0.1272  loss_rpn_loc: 0.2279  time: 1.5007  data_time: 0.7629  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:07:30 d2.utils.events]: \u001b[0m eta: 2:02:03  iter: 1039  total_loss: 1.63  loss_cls: 0.3798  loss_box_reg: 0.5734  loss_mask: 0.3415  loss_rpn_cls: 0.1147  loss_rpn_loc: 0.2359  time: 1.5033  data_time: 0.9722  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:08:03 d2.utils.events]: \u001b[0m eta: 2:02:47  iter: 1059  total_loss: 1.556  loss_cls: 0.3592  loss_box_reg: 0.5596  loss_mask: 0.3306  loss_rpn_cls: 0.114  loss_rpn_loc: 0.23  time: 1.5068  data_time: 1.0237  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:08:44 d2.utils.events]: \u001b[0m eta: 2:02:48  iter: 1079  total_loss: 1.612  loss_cls: 0.4008  loss_box_reg: 0.5328  loss_mask: 0.3304  loss_rpn_cls: 0.0981  loss_rpn_loc: 0.2239  time: 1.5166  data_time: 1.3289  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:09:13 d2.utils.events]: \u001b[0m eta: 2:04:02  iter: 1099  total_loss: 1.62  loss_cls: 0.426  loss_box_reg: 0.5635  loss_mask: 0.3251  loss_rpn_cls: 0.1125  loss_rpn_loc: 0.2198  time: 1.5153  data_time: 0.8229  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:09:47 d2.utils.events]: \u001b[0m eta: 2:04:48  iter: 1119  total_loss: 1.648  loss_cls: 0.3885  loss_box_reg: 0.573  loss_mask: 0.3243  loss_rpn_cls: 0.1157  loss_rpn_loc: 0.2253  time: 1.5190  data_time: 1.0634  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:10:17 d2.utils.events]: \u001b[0m eta: 2:04:44  iter: 1139  total_loss: 1.629  loss_cls: 0.3738  loss_box_reg: 0.5595  loss_mask: 0.3272  loss_rpn_cls: 0.133  loss_rpn_loc: 0.2588  time: 1.5183  data_time: 0.8664  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:10:43 d2.utils.events]: \u001b[0m eta: 2:04:10  iter: 1159  total_loss: 1.542  loss_cls: 0.3455  loss_box_reg: 0.5712  loss_mask: 0.3076  loss_rpn_cls: 0.08651  loss_rpn_loc: 0.2141  time: 1.5140  data_time: 0.6478  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:11:13 d2.utils.events]: \u001b[0m eta: 2:02:55  iter: 1179  total_loss: 1.543  loss_cls: 0.3676  loss_box_reg: 0.5776  loss_mask: 0.3285  loss_rpn_cls: 0.08462  loss_rpn_loc: 0.2188  time: 1.5145  data_time: 0.9285  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:11:37 d2.utils.events]: \u001b[0m eta: 2:02:38  iter: 1199  total_loss: 1.787  loss_cls: 0.4568  loss_box_reg: 0.5433  loss_mask: 0.3221  loss_rpn_cls: 0.1294  loss_rpn_loc: 0.2431  time: 1.5091  data_time: 0.5872  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:11:52 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:11:53 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 21:11:53 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 21:11:53 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 21:11:53 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:11:54 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 21:11:55 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0021 s/iter. Inference: 0.0905 s/iter. Eval: 0.0324 s/iter. Total: 0.1251 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 21:12:00 d2.evaluation.evaluator]: \u001b[0mInference done 52/121. Dataloading: 0.0024 s/iter. Inference: 0.0899 s/iter. Eval: 0.0318 s/iter. Total: 0.1242 s/iter. ETA=0:00:08\n",
      "\u001b[32m[04/12 21:12:05 d2.evaluation.evaluator]: \u001b[0mInference done 89/121. Dataloading: 0.0030 s/iter. Inference: 0.0909 s/iter. Eval: 0.0352 s/iter. Total: 0.1292 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 21:12:09 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:14.753685 (0.127187 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:12:09 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.090140 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:12:09 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 21:12:09 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.22319421029507513\n",
      "\u001b[32m[04/12 21:12:10 d2.engine.hooks]: \u001b[0mSaved best model as latest eval score for MaP IoU is 0.22319, better than last best score 0.21932 @ iteration 967.\n",
      "\u001b[32m[04/12 21:12:24 d2.utils.events]: \u001b[0m eta: 2:01:43  iter: 1219  total_loss: 1.617  loss_cls: 0.3648  loss_box_reg: 0.5301  loss_mask: 0.3128  loss_rpn_cls: 0.1163  loss_rpn_loc: 0.2263  time: 1.5074  data_time: 0.7791  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:12:57 d2.utils.events]: \u001b[0m eta: 2:00:36  iter: 1239  total_loss: 1.617  loss_cls: 0.3852  loss_box_reg: 0.5242  loss_mask: 0.3272  loss_rpn_cls: 0.1148  loss_rpn_loc: 0.2225  time: 1.5096  data_time: 0.9773  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:13:31 d2.utils.events]: \u001b[0m eta: 2:00:20  iter: 1259  total_loss: 1.582  loss_cls: 0.3685  loss_box_reg: 0.5375  loss_mask: 0.321  loss_rpn_cls: 0.1302  loss_rpn_loc: 0.2415  time: 1.5130  data_time: 1.0767  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:14:03 d2.utils.events]: \u001b[0m eta: 1:59:21  iter: 1279  total_loss: 1.62  loss_cls: 0.3945  loss_box_reg: 0.5545  loss_mask: 0.3225  loss_rpn_cls: 0.08059  loss_rpn_loc: 0.2217  time: 1.5143  data_time: 0.9486  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:14:35 d2.utils.events]: \u001b[0m eta: 1:59:39  iter: 1299  total_loss: 1.534  loss_cls: 0.3534  loss_box_reg: 0.5327  loss_mask: 0.3165  loss_rpn_cls: 0.107  loss_rpn_loc: 0.2167  time: 1.5156  data_time: 0.9422  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:15:04 d2.utils.events]: \u001b[0m eta: 1:59:20  iter: 1319  total_loss: 1.582  loss_cls: 0.3574  loss_box_reg: 0.539  loss_mask: 0.3122  loss_rpn_cls: 0.09644  loss_rpn_loc: 0.2184  time: 1.5142  data_time: 0.7862  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:15:32 d2.utils.events]: \u001b[0m eta: 2:00:04  iter: 1339  total_loss: 1.537  loss_cls: 0.3422  loss_box_reg: 0.5641  loss_mask: 0.3214  loss_rpn_cls: 0.1112  loss_rpn_loc: 0.2167  time: 1.5130  data_time: 0.8033  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:16:04 d2.utils.events]: \u001b[0m eta: 1:59:15  iter: 1359  total_loss: 1.597  loss_cls: 0.3746  loss_box_reg: 0.5564  loss_mask: 0.3215  loss_rpn_cls: 0.0971  loss_rpn_loc: 0.2067  time: 1.5140  data_time: 0.9537  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:16:35 d2.utils.events]: \u001b[0m eta: 1:59:49  iter: 1379  total_loss: 1.525  loss_cls: 0.3495  loss_box_reg: 0.528  loss_mask: 0.3005  loss_rpn_cls: 0.1141  loss_rpn_loc: 0.2216  time: 1.5142  data_time: 0.8869  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:17:06 d2.utils.events]: \u001b[0m eta: 1:59:33  iter: 1399  total_loss: 1.666  loss_cls: 0.4173  loss_box_reg: 0.5866  loss_mask: 0.3297  loss_rpn_cls: 0.1139  loss_rpn_loc: 0.2275  time: 1.5149  data_time: 0.9079  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:17:36 d2.utils.events]: \u001b[0m eta: 1:58:58  iter: 1419  total_loss: 1.551  loss_cls: 0.3432  loss_box_reg: 0.5032  loss_mask: 0.3018  loss_rpn_cls: 0.1152  loss_rpn_loc: 0.2424  time: 1.5147  data_time: 0.8506  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:18:00 d2.utils.events]: \u001b[0m eta: 1:58:59  iter: 1439  total_loss: 1.575  loss_cls: 0.3687  loss_box_reg: 0.5594  loss_mask: 0.319  loss_rpn_cls: 0.09423  loss_rpn_loc: 0.2059  time: 1.5107  data_time: 0.6487  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:18:17 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:18:18 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 21:18:18 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 21:18:18 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 21:18:18 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:18:18 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 21:18:21 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0015 s/iter. Inference: 0.0905 s/iter. Eval: 0.0350 s/iter. Total: 0.1271 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 21:18:26 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0018 s/iter. Inference: 0.0909 s/iter. Eval: 0.0362 s/iter. Total: 0.1291 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 21:18:31 d2.evaluation.evaluator]: \u001b[0mInference done 86/121. Dataloading: 0.0023 s/iter. Inference: 0.0923 s/iter. Eval: 0.0390 s/iter. Total: 0.1337 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 21:18:35 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.388522 (0.132660 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:18:35 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091779 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:18:35 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 21:18:35 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.24197331535912736\n",
      "\u001b[32m[04/12 21:18:36 d2.engine.hooks]: \u001b[0mSaved best model as latest eval score for MaP IoU is 0.24197, better than last best score 0.22319 @ iteration 1209.\n",
      "\u001b[32m[04/12 21:18:46 d2.utils.events]: \u001b[0m eta: 1:57:27  iter: 1459  total_loss: 1.615  loss_cls: 0.3754  loss_box_reg: 0.5549  loss_mask: 0.3229  loss_rpn_cls: 0.1029  loss_rpn_loc: 0.2363  time: 1.5079  data_time: 0.6903  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:19:20 d2.utils.events]: \u001b[0m eta: 1:57:03  iter: 1479  total_loss: 1.548  loss_cls: 0.3901  loss_box_reg: 0.5695  loss_mask: 0.3085  loss_rpn_cls: 0.123  loss_rpn_loc: 0.2217  time: 1.5108  data_time: 1.0443  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:19:51 d2.utils.events]: \u001b[0m eta: 1:55:47  iter: 1499  total_loss: 1.528  loss_cls: 0.3493  loss_box_reg: 0.5445  loss_mask: 0.3023  loss_rpn_cls: 0.1112  loss_rpn_loc: 0.2172  time: 1.5110  data_time: 0.9196  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:20:17 d2.utils.events]: \u001b[0m eta: 1:55:31  iter: 1519  total_loss: 1.561  loss_cls: 0.3532  loss_box_reg: 0.5762  loss_mask: 0.311  loss_rpn_cls: 0.08573  loss_rpn_loc: 0.2112  time: 1.5084  data_time: 0.6910  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:20:45 d2.utils.events]: \u001b[0m eta: 1:54:12  iter: 1539  total_loss: 1.451  loss_cls: 0.3104  loss_box_reg: 0.5184  loss_mask: 0.3036  loss_rpn_cls: 0.08768  loss_rpn_loc: 0.2157  time: 1.5067  data_time: 0.7454  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:21:15 d2.utils.events]: \u001b[0m eta: 1:53:25  iter: 1559  total_loss: 1.567  loss_cls: 0.3713  loss_box_reg: 0.5347  loss_mask: 0.3111  loss_rpn_cls: 0.09925  loss_rpn_loc: 0.2176  time: 1.5068  data_time: 0.8728  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:21:48 d2.utils.events]: \u001b[0m eta: 1:53:26  iter: 1579  total_loss: 1.619  loss_cls: 0.3429  loss_box_reg: 0.5253  loss_mask: 0.3228  loss_rpn_cls: 0.1037  loss_rpn_loc: 0.2372  time: 1.5085  data_time: 0.9823  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:22:23 d2.utils.events]: \u001b[0m eta: 1:53:41  iter: 1599  total_loss: 1.563  loss_cls: 0.3609  loss_box_reg: 0.5197  loss_mask: 0.3123  loss_rpn_cls: 0.1162  loss_rpn_loc: 0.2317  time: 1.5114  data_time: 1.0655  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:22:51 d2.utils.events]: \u001b[0m eta: 1:53:59  iter: 1619  total_loss: 1.593  loss_cls: 0.4024  loss_box_reg: 0.5666  loss_mask: 0.3081  loss_rpn_cls: 0.1015  loss_rpn_loc: 0.1991  time: 1.5102  data_time: 0.7925  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:23:18 d2.utils.events]: \u001b[0m eta: 1:53:30  iter: 1639  total_loss: 1.491  loss_cls: 0.3286  loss_box_reg: 0.5511  loss_mask: 0.3074  loss_rpn_cls: 0.08212  loss_rpn_loc: 0.2058  time: 1.5083  data_time: 0.7017  lr: 0.001  max_mem: 6444M\n",
      "\u001b[32m[04/12 21:24:01 d2.utils.events]: \u001b[0m eta: 1:54:23  iter: 1659  total_loss: 1.555  loss_cls: 0.3767  loss_box_reg: 0.4822  loss_mask: 0.3244  loss_rpn_cls: 0.1113  loss_rpn_loc: 0.2322  time: 1.5161  data_time: 1.4624  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:24:30 d2.utils.events]: \u001b[0m eta: 1:53:22  iter: 1679  total_loss: 1.514  loss_cls: 0.3422  loss_box_reg: 0.5482  loss_mask: 0.3134  loss_rpn_cls: 0.0945  loss_rpn_loc: 0.2045  time: 1.5153  data_time: 0.8267  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:24:48 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:24:48 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 21:24:48 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 21:24:48 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 21:24:48 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:24:49 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 21:24:51 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0014 s/iter. Inference: 0.0895 s/iter. Eval: 0.0333 s/iter. Total: 0.1242 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 21:24:56 d2.evaluation.evaluator]: \u001b[0mInference done 48/121. Dataloading: 0.0019 s/iter. Inference: 0.0922 s/iter. Eval: 0.0402 s/iter. Total: 0.1344 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 21:25:01 d2.evaluation.evaluator]: \u001b[0mInference done 87/121. Dataloading: 0.0021 s/iter. Inference: 0.0919 s/iter. Eval: 0.0393 s/iter. Total: 0.1334 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 21:25:06 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.279545 (0.131720 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:25:06 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091371 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:25:06 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 21:25:06 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.24028064531718013\n",
      "\u001b[32m[04/12 21:25:06 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.24028, not better than best score 0.24197 @ iteration 1451.\n",
      "\u001b[32m[04/12 21:25:13 d2.utils.events]: \u001b[0m eta: 1:52:41  iter: 1699  total_loss: 1.484  loss_cls: 0.3099  loss_box_reg: 0.5159  loss_mask: 0.3045  loss_rpn_cls: 0.07288  loss_rpn_loc: 0.2062  time: 1.5118  data_time: 0.5826  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:25:49 d2.utils.events]: \u001b[0m eta: 1:52:49  iter: 1719  total_loss: 1.6  loss_cls: 0.4056  loss_box_reg: 0.5297  loss_mask: 0.306  loss_rpn_cls: 0.1014  loss_rpn_loc: 0.2226  time: 1.5148  data_time: 1.1101  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:26:19 d2.utils.events]: \u001b[0m eta: 1:52:21  iter: 1739  total_loss: 1.59  loss_cls: 0.3954  loss_box_reg: 0.5263  loss_mask: 0.3127  loss_rpn_cls: 0.1065  loss_rpn_loc: 0.2005  time: 1.5148  data_time: 0.8611  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:26:45 d2.utils.events]: \u001b[0m eta: 1:51:02  iter: 1759  total_loss: 1.494  loss_cls: 0.3477  loss_box_reg: 0.5359  loss_mask: 0.2948  loss_rpn_cls: 0.1032  loss_rpn_loc: 0.2078  time: 1.5121  data_time: 0.6445  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:27:13 d2.utils.events]: \u001b[0m eta: 1:50:32  iter: 1779  total_loss: 1.507  loss_cls: 0.3463  loss_box_reg: 0.5346  loss_mask: 0.3183  loss_rpn_cls: 0.08976  loss_rpn_loc: 0.1956  time: 1.5106  data_time: 0.7537  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:27:43 d2.utils.events]: \u001b[0m eta: 1:49:19  iter: 1799  total_loss: 1.455  loss_cls: 0.3317  loss_box_reg: 0.4938  loss_mask: 0.2962  loss_rpn_cls: 0.08381  loss_rpn_loc: 0.1962  time: 1.5105  data_time: 0.9104  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:28:17 d2.utils.events]: \u001b[0m eta: 1:49:54  iter: 1819  total_loss: 1.547  loss_cls: 0.3739  loss_box_reg: 0.549  loss_mask: 0.3258  loss_rpn_cls: 0.1219  loss_rpn_loc: 0.227  time: 1.5126  data_time: 1.0362  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:28:47 d2.utils.events]: \u001b[0m eta: 1:50:20  iter: 1839  total_loss: 1.503  loss_cls: 0.3381  loss_box_reg: 0.535  loss_mask: 0.3187  loss_rpn_cls: 0.09026  loss_rpn_loc: 0.2071  time: 1.5124  data_time: 0.8514  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:29:26 d2.utils.events]: \u001b[0m eta: 1:50:30  iter: 1859  total_loss: 1.493  loss_cls: 0.3827  loss_box_reg: 0.4985  loss_mask: 0.3073  loss_rpn_cls: 0.1277  loss_rpn_loc: 0.2415  time: 1.5174  data_time: 1.3138  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:29:56 d2.utils.events]: \u001b[0m eta: 1:50:38  iter: 1879  total_loss: 1.505  loss_cls: 0.3694  loss_box_reg: 0.5305  loss_mask: 0.3152  loss_rpn_cls: 0.09297  loss_rpn_loc: 0.2163  time: 1.5171  data_time: 0.8414  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:30:27 d2.utils.events]: \u001b[0m eta: 1:50:46  iter: 1899  total_loss: 1.525  loss_cls: 0.3698  loss_box_reg: 0.5371  loss_mask: 0.3139  loss_rpn_cls: 0.09826  loss_rpn_loc: 0.2292  time: 1.5174  data_time: 0.9090  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:30:56 d2.utils.events]: \u001b[0m eta: 1:49:57  iter: 1919  total_loss: 1.491  loss_cls: 0.3378  loss_box_reg: 0.5488  loss_mask: 0.3063  loss_rpn_cls: 0.07722  loss_rpn_loc: 0.2001  time: 1.5170  data_time: 0.8584  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:31:21 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:31:22 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 21:31:22 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 21:31:22 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 21:31:22 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:31:23 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 21:31:24 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0014 s/iter. Inference: 0.0903 s/iter. Eval: 0.0342 s/iter. Total: 0.1259 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 21:31:29 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0022 s/iter. Inference: 0.0911 s/iter. Eval: 0.0364 s/iter. Total: 0.1297 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 21:31:35 d2.evaluation.evaluator]: \u001b[0mInference done 87/121. Dataloading: 0.0025 s/iter. Inference: 0.0922 s/iter. Eval: 0.0383 s/iter. Total: 0.1330 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 21:31:39 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.260842 (0.131559 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:31:39 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091503 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:31:39 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 21:31:39 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2416311242129682\n",
      "\u001b[32m[04/12 21:31:39 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.24163, not better than best score 0.24197 @ iteration 1451.\n",
      "\u001b[32m[04/12 21:31:42 d2.utils.events]: \u001b[0m eta: 1:49:07  iter: 1939  total_loss: 1.535  loss_cls: 0.3435  loss_box_reg: 0.5574  loss_mask: 0.3035  loss_rpn_cls: 0.1049  loss_rpn_loc: 0.2228  time: 1.5152  data_time: 0.7066  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:32:05 d2.utils.events]: \u001b[0m eta: 1:48:32  iter: 1959  total_loss: 1.487  loss_cls: 0.3388  loss_box_reg: 0.546  loss_mask: 0.321  loss_rpn_cls: 0.08688  loss_rpn_loc: 0.2132  time: 1.5116  data_time: 0.5203  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:32:29 d2.utils.events]: \u001b[0m eta: 1:47:53  iter: 1979  total_loss: 1.51  loss_cls: 0.3361  loss_box_reg: 0.5499  loss_mask: 0.3046  loss_rpn_cls: 0.0899  loss_rpn_loc: 0.2016  time: 1.5086  data_time: 0.5941  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:33:01 d2.utils.events]: \u001b[0m eta: 1:47:59  iter: 1999  total_loss: 1.525  loss_cls: 0.3679  loss_box_reg: 0.5161  loss_mask: 0.3062  loss_rpn_cls: 0.09555  loss_rpn_loc: 0.2273  time: 1.5094  data_time: 0.9284  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:33:42 d2.utils.events]: \u001b[0m eta: 1:48:14  iter: 2019  total_loss: 1.487  loss_cls: 0.3466  loss_box_reg: 0.5238  loss_mask: 0.3112  loss_rpn_cls: 0.1062  loss_rpn_loc: 0.2142  time: 1.5147  data_time: 1.3409  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:34:04 d2.utils.events]: \u001b[0m eta: 1:47:27  iter: 2039  total_loss: 1.511  loss_cls: 0.3375  loss_box_reg: 0.5727  loss_mask: 0.3081  loss_rpn_cls: 0.08159  loss_rpn_loc: 0.1986  time: 1.5105  data_time: 0.4727  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:34:27 d2.utils.events]: \u001b[0m eta: 1:46:40  iter: 2059  total_loss: 1.515  loss_cls: 0.3628  loss_box_reg: 0.5542  loss_mask: 0.3095  loss_rpn_cls: 0.07025  loss_rpn_loc: 0.2216  time: 1.5071  data_time: 0.5483  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:35:04 d2.utils.events]: \u001b[0m eta: 1:46:42  iter: 2079  total_loss: 1.53  loss_cls: 0.3528  loss_box_reg: 0.5411  loss_mask: 0.3162  loss_rpn_cls: 0.1062  loss_rpn_loc: 0.2081  time: 1.5106  data_time: 1.2107  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:35:34 d2.utils.events]: \u001b[0m eta: 1:45:43  iter: 2099  total_loss: 1.436  loss_cls: 0.3401  loss_box_reg: 0.5227  loss_mask: 0.3089  loss_rpn_cls: 0.08939  loss_rpn_loc: 0.1907  time: 1.5105  data_time: 0.8826  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:36:00 d2.utils.events]: \u001b[0m eta: 1:45:17  iter: 2119  total_loss: 1.502  loss_cls: 0.3428  loss_box_reg: 0.5629  loss_mask: 0.2987  loss_rpn_cls: 0.1056  loss_rpn_loc: 0.1975  time: 1.5084  data_time: 0.6505  lr: 0.001  max_mem: 6515M\n",
      "\u001b[32m[04/12 21:36:37 d2.utils.events]: \u001b[0m eta: 1:45:41  iter: 2139  total_loss: 1.526  loss_cls: 0.3241  loss_box_reg: 0.5209  loss_mask: 0.3152  loss_rpn_cls: 0.1011  loss_rpn_loc: 0.2332  time: 1.5115  data_time: 1.2065  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:37:07 d2.utils.events]: \u001b[0m eta: 1:45:38  iter: 2159  total_loss: 1.66  loss_cls: 0.3724  loss_box_reg: 0.5587  loss_mask: 0.3283  loss_rpn_cls: 0.1232  loss_rpn_loc: 0.2222  time: 1.5115  data_time: 0.8642  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:37:42 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:37:43 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 21:37:43 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 21:37:43 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 21:37:44 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:37:44 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 21:37:46 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0052 s/iter. Inference: 0.0961 s/iter. Eval: 0.0471 s/iter. Total: 0.1484 s/iter. ETA=0:00:16\n",
      "\u001b[32m[04/12 21:37:51 d2.evaluation.evaluator]: \u001b[0mInference done 48/121. Dataloading: 0.0029 s/iter. Inference: 0.0941 s/iter. Eval: 0.0428 s/iter. Total: 0.1399 s/iter. ETA=0:00:10\n",
      "\u001b[32m[04/12 21:37:56 d2.evaluation.evaluator]: \u001b[0mInference done 87/121. Dataloading: 0.0027 s/iter. Inference: 0.0929 s/iter. Eval: 0.0410 s/iter. Total: 0.1367 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 21:38:01 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.998669 (0.137920 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:38:01 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.093145 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:38:01 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 21:38:01 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2536280642168237\n",
      "\u001b[32m[04/12 21:38:01 d2.engine.hooks]: \u001b[0mSaved best model as latest eval score for MaP IoU is 0.25363, better than last best score 0.24197 @ iteration 1451.\n",
      "\u001b[32m[04/12 21:38:03 d2.utils.events]: \u001b[0m eta: 1:45:38  iter: 2179  total_loss: 1.486  loss_cls: 0.3465  loss_box_reg: 0.5145  loss_mask: 0.3102  loss_rpn_cls: 0.09132  loss_rpn_loc: 0.2119  time: 1.5142  data_time: 1.1429  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:38:38 d2.utils.events]: \u001b[0m eta: 1:45:03  iter: 2199  total_loss: 1.433  loss_cls: 0.3087  loss_box_reg: 0.5072  loss_mask: 0.3097  loss_rpn_cls: 0.09018  loss_rpn_loc: 0.2058  time: 1.5160  data_time: 1.0363  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:39:04 d2.utils.events]: \u001b[0m eta: 1:44:39  iter: 2219  total_loss: 1.406  loss_cls: 0.3007  loss_box_reg: 0.5047  loss_mask: 0.3121  loss_rpn_cls: 0.06878  loss_rpn_loc: 0.2054  time: 1.5141  data_time: 0.6834  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:39:38 d2.utils.events]: \u001b[0m eta: 1:44:23  iter: 2239  total_loss: 1.488  loss_cls: 0.351  loss_box_reg: 0.5162  loss_mask: 0.3153  loss_rpn_cls: 0.1006  loss_rpn_loc: 0.2218  time: 1.5160  data_time: 1.0867  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:40:02 d2.utils.events]: \u001b[0m eta: 1:44:05  iter: 2259  total_loss: 1.504  loss_cls: 0.3443  loss_box_reg: 0.5328  loss_mask: 0.3138  loss_rpn_cls: 0.0688  loss_rpn_loc: 0.1949  time: 1.5131  data_time: 0.5748  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:40:31 d2.utils.events]: \u001b[0m eta: 1:43:46  iter: 2279  total_loss: 1.565  loss_cls: 0.3763  loss_box_reg: 0.5251  loss_mask: 0.3138  loss_rpn_cls: 0.1103  loss_rpn_loc: 0.2191  time: 1.5126  data_time: 0.8310  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:41:13 d2.utils.events]: \u001b[0m eta: 1:43:42  iter: 2299  total_loss: 1.644  loss_cls: 0.4124  loss_box_reg: 0.568  loss_mask: 0.3152  loss_rpn_cls: 0.1186  loss_rpn_loc: 0.2334  time: 1.5175  data_time: 1.3735  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:41:43 d2.utils.events]: \u001b[0m eta: 1:44:04  iter: 2319  total_loss: 1.55  loss_cls: 0.3205  loss_box_reg: 0.5312  loss_mask: 0.3231  loss_rpn_cls: 0.09997  loss_rpn_loc: 0.2245  time: 1.5176  data_time: 0.8779  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:42:17 d2.utils.events]: \u001b[0m eta: 1:43:28  iter: 2339  total_loss: 1.517  loss_cls: 0.3719  loss_box_reg: 0.541  loss_mask: 0.3152  loss_rpn_cls: 0.0994  loss_rpn_loc: 0.2026  time: 1.5189  data_time: 1.0104  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:42:48 d2.utils.events]: \u001b[0m eta: 1:43:19  iter: 2359  total_loss: 1.422  loss_cls: 0.2933  loss_box_reg: 0.5067  loss_mask: 0.3018  loss_rpn_cls: 0.07947  loss_rpn_loc: 0.2214  time: 1.5193  data_time: 0.9004  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:43:15 d2.utils.events]: \u001b[0m eta: 1:42:38  iter: 2379  total_loss: 1.515  loss_cls: 0.3423  loss_box_reg: 0.5345  loss_mask: 0.2946  loss_rpn_cls: 0.06903  loss_rpn_loc: 0.2099  time: 1.5180  data_time: 0.7108  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:43:35 d2.utils.events]: \u001b[0m eta: 1:42:12  iter: 2399  total_loss: 1.534  loss_cls: 0.3509  loss_box_reg: 0.5352  loss_mask: 0.2971  loss_rpn_cls: 0.08726  loss_rpn_loc: 0.2122  time: 1.5135  data_time: 0.3733  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:44:08 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:44:08 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 21:44:08 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 21:44:08 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 21:44:09 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:44:09 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 21:44:11 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0016 s/iter. Inference: 0.0911 s/iter. Eval: 0.0346 s/iter. Total: 0.1273 s/iter. ETA=0:00:14\n",
      "\u001b[32m[04/12 21:44:16 d2.evaluation.evaluator]: \u001b[0mInference done 51/121. Dataloading: 0.0020 s/iter. Inference: 0.0903 s/iter. Eval: 0.0342 s/iter. Total: 0.1266 s/iter. ETA=0:00:08\n",
      "\u001b[32m[04/12 21:44:21 d2.evaluation.evaluator]: \u001b[0mInference done 90/121. Dataloading: 0.0023 s/iter. Inference: 0.0912 s/iter. Eval: 0.0358 s/iter. Total: 0.1294 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 21:44:25 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:14.873690 (0.128221 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:44:25 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.090732 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:44:25 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 21:44:25 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.252132643524199\n",
      "\u001b[32m[04/12 21:44:25 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25213, not better than best score 0.25363 @ iteration 2177.\n",
      "\u001b[32m[04/12 21:44:25 d2.utils.events]: \u001b[0m eta: 1:41:55  iter: 2419  total_loss: 1.559  loss_cls: 0.3729  loss_box_reg: 0.5391  loss_mask: 0.3032  loss_rpn_cls: 0.1087  loss_rpn_loc: 0.2294  time: 1.5141  data_time: 0.9448  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:44:55 d2.utils.events]: \u001b[0m eta: 1:41:32  iter: 2439  total_loss: 1.38  loss_cls: 0.317  loss_box_reg: 0.5024  loss_mask: 0.3048  loss_rpn_cls: 0.0687  loss_rpn_loc: 0.1896  time: 1.5139  data_time: 0.8352  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:45:31 d2.utils.events]: \u001b[0m eta: 1:41:23  iter: 2459  total_loss: 1.57  loss_cls: 0.3659  loss_box_reg: 0.5332  loss_mask: 0.3112  loss_rpn_cls: 0.09938  loss_rpn_loc: 0.2296  time: 1.5161  data_time: 1.1203  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:46:07 d2.utils.events]: \u001b[0m eta: 1:41:07  iter: 2479  total_loss: 1.425  loss_cls: 0.3103  loss_box_reg: 0.505  loss_mask: 0.3006  loss_rpn_cls: 0.08342  loss_rpn_loc: 0.2191  time: 1.5184  data_time: 1.1056  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:46:36 d2.utils.events]: \u001b[0m eta: 1:40:51  iter: 2499  total_loss: 1.523  loss_cls: 0.3277  loss_box_reg: 0.531  loss_mask: 0.299  loss_rpn_cls: 0.09762  loss_rpn_loc: 0.2156  time: 1.5179  data_time: 0.8384  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:47:00 d2.utils.events]: \u001b[0m eta: 1:40:45  iter: 2519  total_loss: 1.498  loss_cls: 0.3614  loss_box_reg: 0.5635  loss_mask: 0.3263  loss_rpn_cls: 0.1031  loss_rpn_loc: 0.1955  time: 1.5155  data_time: 0.5974  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:47:29 d2.utils.events]: \u001b[0m eta: 1:40:53  iter: 2539  total_loss: 1.38  loss_cls: 0.2958  loss_box_reg: 0.5257  loss_mask: 0.3022  loss_rpn_cls: 0.06221  loss_rpn_loc: 0.1884  time: 1.5149  data_time: 0.8024  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:47:59 d2.utils.events]: \u001b[0m eta: 1:40:59  iter: 2559  total_loss: 1.498  loss_cls: 0.3513  loss_box_reg: 0.5494  loss_mask: 0.3101  loss_rpn_cls: 0.07805  loss_rpn_loc: 0.2116  time: 1.5148  data_time: 0.8838  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:48:42 d2.utils.events]: \u001b[0m eta: 1:41:14  iter: 2579  total_loss: 1.554  loss_cls: 0.3655  loss_box_reg: 0.5292  loss_mask: 0.3258  loss_rpn_cls: 0.1178  loss_rpn_loc: 0.2159  time: 1.5197  data_time: 1.4518  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:49:13 d2.utils.events]: \u001b[0m eta: 1:40:26  iter: 2599  total_loss: 1.537  loss_cls: 0.3631  loss_box_reg: 0.5328  loss_mask: 0.3149  loss_rpn_cls: 0.1079  loss_rpn_loc: 0.2312  time: 1.5198  data_time: 0.8761  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:49:37 d2.utils.events]: \u001b[0m eta: 1:40:10  iter: 2619  total_loss: 1.57  loss_cls: 0.355  loss_box_reg: 0.5477  loss_mask: 0.3095  loss_rpn_cls: 0.09432  loss_rpn_loc: 0.2112  time: 1.5175  data_time: 0.5952  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:50:06 d2.utils.events]: \u001b[0m eta: 1:39:58  iter: 2639  total_loss: 1.565  loss_cls: 0.4041  loss_box_reg: 0.5362  loss_mask: 0.3044  loss_rpn_cls: 0.08534  loss_rpn_loc: 0.2103  time: 1.5167  data_time: 0.7764  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:50:43 d2.utils.events]: \u001b[0m eta: 1:39:17  iter: 2659  total_loss: 1.519  loss_cls: 0.3769  loss_box_reg: 0.5279  loss_mask: 0.3106  loss_rpn_cls: 0.1068  loss_rpn_loc: 0.2194  time: 1.5192  data_time: 1.1918  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:50:45 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:50:45 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 21:50:45 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 21:50:45 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 21:50:45 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:50:46 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 21:50:48 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0020 s/iter. Inference: 0.0911 s/iter. Eval: 0.0346 s/iter. Total: 0.1278 s/iter. ETA=0:00:14\n",
      "\u001b[32m[04/12 21:50:53 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0021 s/iter. Inference: 0.0910 s/iter. Eval: 0.0359 s/iter. Total: 0.1292 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 21:50:58 d2.evaluation.evaluator]: \u001b[0mInference done 87/121. Dataloading: 0.0021 s/iter. Inference: 0.0921 s/iter. Eval: 0.0378 s/iter. Total: 0.1321 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 21:51:02 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.152701 (0.130627 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:51:02 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091553 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:51:02 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 21:51:02 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2561204292571362\n",
      "\u001b[32m[04/12 21:51:02 d2.engine.hooks]: \u001b[0mSaved best model as latest eval score for MaP IoU is 0.25612, better than last best score 0.25363 @ iteration 2177.\n",
      "\u001b[32m[04/12 21:51:33 d2.utils.events]: \u001b[0m eta: 1:39:21  iter: 2679  total_loss: 1.516  loss_cls: 0.3461  loss_box_reg: 0.5543  loss_mask: 0.3191  loss_rpn_cls: 0.09984  loss_rpn_loc: 0.218  time: 1.5198  data_time: 0.9683  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:52:02 d2.utils.events]: \u001b[0m eta: 1:39:25  iter: 2699  total_loss: 1.544  loss_cls: 0.3792  loss_box_reg: 0.5356  loss_mask: 0.3108  loss_rpn_cls: 0.07992  loss_rpn_loc: 0.2177  time: 1.5194  data_time: 0.8072  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:52:33 d2.utils.events]: \u001b[0m eta: 1:38:53  iter: 2719  total_loss: 1.518  loss_cls: 0.3646  loss_box_reg: 0.525  loss_mask: 0.3209  loss_rpn_cls: 0.1172  loss_rpn_loc: 0.2117  time: 1.5194  data_time: 0.8991  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:53:09 d2.utils.events]: \u001b[0m eta: 1:39:10  iter: 2739  total_loss: 1.498  loss_cls: 0.3188  loss_box_reg: 0.4935  loss_mask: 0.3048  loss_rpn_cls: 0.1095  loss_rpn_loc: 0.2145  time: 1.5214  data_time: 1.1570  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:53:46 d2.utils.events]: \u001b[0m eta: 1:38:36  iter: 2759  total_loss: 1.521  loss_cls: 0.3168  loss_box_reg: 0.5279  loss_mask: 0.3119  loss_rpn_cls: 0.08433  loss_rpn_loc: 0.2146  time: 1.5238  data_time: 1.1807  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:54:07 d2.utils.events]: \u001b[0m eta: 1:38:04  iter: 2779  total_loss: 1.419  loss_cls: 0.3275  loss_box_reg: 0.5309  loss_mask: 0.2971  loss_rpn_cls: 0.07187  loss_rpn_loc: 0.1979  time: 1.5204  data_time: 0.4211  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:54:32 d2.utils.events]: \u001b[0m eta: 1:38:21  iter: 2799  total_loss: 1.466  loss_cls: 0.3303  loss_box_reg: 0.5362  loss_mask: 0.3053  loss_rpn_cls: 0.08774  loss_rpn_loc: 0.2247  time: 1.5185  data_time: 0.6382  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:55:02 d2.utils.events]: \u001b[0m eta: 1:38:05  iter: 2819  total_loss: 1.606  loss_cls: 0.3475  loss_box_reg: 0.544  loss_mask: 0.3181  loss_rpn_cls: 0.09935  loss_rpn_loc: 0.2074  time: 1.5184  data_time: 0.8553  lr: 0.001  max_mem: 6594M\n",
      "\u001b[32m[04/12 21:55:46 d2.utils.events]: \u001b[0m eta: 1:38:03  iter: 2839  total_loss: 1.586  loss_cls: 0.389  loss_box_reg: 0.5254  loss_mask: 0.312  loss_rpn_cls: 0.1087  loss_rpn_loc: 0.2111  time: 1.5233  data_time: 1.5096  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 21:56:26 d2.utils.events]: \u001b[0m eta: 1:37:58  iter: 2859  total_loss: 1.543  loss_cls: 0.3765  loss_box_reg: 0.5309  loss_mask: 0.31  loss_rpn_cls: 0.1089  loss_rpn_loc: 0.2297  time: 1.5266  data_time: 1.3351  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 21:56:48 d2.utils.events]: \u001b[0m eta: 1:36:43  iter: 2879  total_loss: 1.454  loss_cls: 0.3185  loss_box_reg: 0.5247  loss_mask: 0.3035  loss_rpn_cls: 0.06072  loss_rpn_loc: 0.1868  time: 1.5236  data_time: 0.4854  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 21:57:07 d2.utils.events]: \u001b[0m eta: 1:35:50  iter: 2899  total_loss: 1.349  loss_cls: 0.2594  loss_box_reg: 0.5247  loss_mask: 0.2859  loss_rpn_cls: 0.0637  loss_rpn_loc: 0.2125  time: 1.5197  data_time: 0.3485  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 21:57:12 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:57:12 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 21:57:12 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 21:57:12 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 21:57:13 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 21:57:13 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 21:57:15 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0015 s/iter. Inference: 0.0889 s/iter. Eval: 0.0331 s/iter. Total: 0.1235 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 21:57:20 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0021 s/iter. Inference: 0.0908 s/iter. Eval: 0.0353 s/iter. Total: 0.1283 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 21:57:25 d2.evaluation.evaluator]: \u001b[0mInference done 88/121. Dataloading: 0.0024 s/iter. Inference: 0.0914 s/iter. Eval: 0.0365 s/iter. Total: 0.1303 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 21:57:29 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:14.912370 (0.128555 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:57:29 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.090816 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 21:57:29 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 21:57:29 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.25295589826531556\n",
      "\u001b[32m[04/12 21:57:29 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25296, not better than best score 0.25612 @ iteration 2661.\n",
      "\u001b[32m[04/12 21:57:50 d2.utils.events]: \u001b[0m eta: 1:35:30  iter: 2919  total_loss: 1.413  loss_cls: 0.3102  loss_box_reg: 0.509  loss_mask: 0.298  loss_rpn_cls: 0.09967  loss_rpn_loc: 0.1992  time: 1.5176  data_time: 0.5902  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 21:58:24 d2.utils.events]: \u001b[0m eta: 1:35:24  iter: 2939  total_loss: 1.453  loss_cls: 0.3266  loss_box_reg: 0.4997  loss_mask: 0.3059  loss_rpn_cls: 0.0864  loss_rpn_loc: 0.1938  time: 1.5189  data_time: 1.0348  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 21:59:05 d2.utils.events]: \u001b[0m eta: 1:35:53  iter: 2959  total_loss: 1.394  loss_cls: 0.3139  loss_box_reg: 0.4932  loss_mask: 0.2992  loss_rpn_cls: 0.09717  loss_rpn_loc: 0.2138  time: 1.5224  data_time: 1.3480  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 21:59:33 d2.utils.events]: \u001b[0m eta: 1:35:54  iter: 2979  total_loss: 1.507  loss_cls: 0.3186  loss_box_reg: 0.5334  loss_mask: 0.3156  loss_rpn_cls: 0.1021  loss_rpn_loc: 0.2167  time: 1.5218  data_time: 0.8069  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 21:59:58 d2.utils.events]: \u001b[0m eta: 1:34:54  iter: 2999  total_loss: 1.432  loss_cls: 0.3169  loss_box_reg: 0.5349  loss_mask: 0.2992  loss_rpn_cls: 0.05782  loss_rpn_loc: 0.1951  time: 1.5200  data_time: 0.6521  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:00:29 d2.utils.events]: \u001b[0m eta: 1:34:24  iter: 3019  total_loss: 1.545  loss_cls: 0.3686  loss_box_reg: 0.531  loss_mask: 0.2928  loss_rpn_cls: 0.1052  loss_rpn_loc: 0.2327  time: 1.5200  data_time: 0.8961  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:00:56 d2.utils.events]: \u001b[0m eta: 1:34:08  iter: 3039  total_loss: 1.343  loss_cls: 0.287  loss_box_reg: 0.504  loss_mask: 0.3054  loss_rpn_cls: 0.06263  loss_rpn_loc: 0.2048  time: 1.5189  data_time: 0.7315  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:01:40 d2.utils.events]: \u001b[0m eta: 1:35:10  iter: 3059  total_loss: 1.637  loss_cls: 0.3774  loss_box_reg: 0.5199  loss_mask: 0.3241  loss_rpn_cls: 0.1343  loss_rpn_loc: 0.2411  time: 1.5233  data_time: 1.5016  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:02:12 d2.utils.events]: \u001b[0m eta: 1:33:49  iter: 3079  total_loss: 1.489  loss_cls: 0.3587  loss_box_reg: 0.5207  loss_mask: 0.3086  loss_rpn_cls: 0.09251  loss_rpn_loc: 0.2095  time: 1.5239  data_time: 0.9708  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:02:35 d2.utils.events]: \u001b[0m eta: 1:33:24  iter: 3099  total_loss: 1.478  loss_cls: 0.3325  loss_box_reg: 0.5391  loss_mask: 0.2967  loss_rpn_cls: 0.06519  loss_rpn_loc: 0.192  time: 1.5215  data_time: 0.5206  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:03:03 d2.utils.events]: \u001b[0m eta: 1:33:31  iter: 3119  total_loss: 1.458  loss_cls: 0.3268  loss_box_reg: 0.5347  loss_mask: 0.3072  loss_rpn_cls: 0.08744  loss_rpn_loc: 0.222  time: 1.5207  data_time: 0.7713  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:03:38 d2.utils.events]: \u001b[0m eta: 1:33:00  iter: 3139  total_loss: 1.555  loss_cls: 0.3612  loss_box_reg: 0.5458  loss_mask: 0.322  loss_rpn_cls: 0.08983  loss_rpn_loc: 0.2131  time: 1.5222  data_time: 1.0611  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:03:46 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:03:47 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 22:03:47 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 22:03:47 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 22:03:47 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:03:48 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 22:03:49 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0017 s/iter. Inference: 0.0891 s/iter. Eval: 0.0322 s/iter. Total: 0.1231 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 22:03:54 d2.evaluation.evaluator]: \u001b[0mInference done 48/121. Dataloading: 0.0025 s/iter. Inference: 0.0928 s/iter. Eval: 0.0391 s/iter. Total: 0.1344 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 22:03:59 d2.evaluation.evaluator]: \u001b[0mInference done 87/121. Dataloading: 0.0025 s/iter. Inference: 0.0921 s/iter. Eval: 0.0385 s/iter. Total: 0.1331 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 22:04:04 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.147468 (0.130582 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:04:04 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091173 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:04:04 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 22:04:04 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.23756197181298952\n",
      "\u001b[32m[04/12 22:04:04 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.23756, not better than best score 0.25612 @ iteration 2661.\n",
      "\u001b[32m[04/12 22:04:24 d2.utils.events]: \u001b[0m eta: 1:32:44  iter: 3159  total_loss: 1.516  loss_cls: 0.3129  loss_box_reg: 0.5082  loss_mask: 0.3099  loss_rpn_cls: 0.1195  loss_rpn_loc: 0.2311  time: 1.5216  data_time: 0.8016  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:05:05 d2.utils.events]: \u001b[0m eta: 1:33:10  iter: 3179  total_loss: 1.646  loss_cls: 0.4249  loss_box_reg: 0.4927  loss_mask: 0.3215  loss_rpn_cls: 0.1466  loss_rpn_loc: 0.2316  time: 1.5249  data_time: 1.3623  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:05:28 d2.utils.events]: \u001b[0m eta: 1:32:36  iter: 3199  total_loss: 1.574  loss_cls: 0.3982  loss_box_reg: 0.538  loss_mask: 0.2928  loss_rpn_cls: 0.1045  loss_rpn_loc: 0.2159  time: 1.5225  data_time: 0.5292  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:06:04 d2.utils.events]: \u001b[0m eta: 1:33:29  iter: 3219  total_loss: 1.447  loss_cls: 0.3294  loss_box_reg: 0.4945  loss_mask: 0.3004  loss_rpn_cls: 0.1031  loss_rpn_loc: 0.2169  time: 1.5240  data_time: 1.1251  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:06:33 d2.utils.events]: \u001b[0m eta: 1:32:42  iter: 3239  total_loss: 1.52  loss_cls: 0.3434  loss_box_reg: 0.5118  loss_mask: 0.3038  loss_rpn_cls: 0.06526  loss_rpn_loc: 0.2059  time: 1.5237  data_time: 0.8052  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:07:03 d2.utils.events]: \u001b[0m eta: 1:31:48  iter: 3259  total_loss: 1.494  loss_cls: 0.3285  loss_box_reg: 0.5313  loss_mask: 0.3185  loss_rpn_cls: 0.07788  loss_rpn_loc: 0.2153  time: 1.5237  data_time: 0.8716  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:07:34 d2.utils.events]: \u001b[0m eta: 1:32:09  iter: 3279  total_loss: 1.398  loss_cls: 0.3326  loss_box_reg: 0.4981  loss_mask: 0.2975  loss_rpn_cls: 0.08986  loss_rpn_loc: 0.2075  time: 1.5238  data_time: 0.8722  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:07:59 d2.utils.events]: \u001b[0m eta: 1:30:50  iter: 3299  total_loss: 1.463  loss_cls: 0.3048  loss_box_reg: 0.5322  loss_mask: 0.2996  loss_rpn_cls: 0.06783  loss_rpn_loc: 0.1797  time: 1.5221  data_time: 0.6274  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:08:24 d2.utils.events]: \u001b[0m eta: 1:30:21  iter: 3319  total_loss: 1.471  loss_cls: 0.3312  loss_box_reg: 0.5259  loss_mask: 0.3089  loss_rpn_cls: 0.08667  loss_rpn_loc: 0.2093  time: 1.5205  data_time: 0.6283  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:08:55 d2.utils.events]: \u001b[0m eta: 1:30:05  iter: 3339  total_loss: 1.475  loss_cls: 0.3195  loss_box_reg: 0.5074  loss_mask: 0.3036  loss_rpn_cls: 0.08498  loss_rpn_loc: 0.1943  time: 1.5204  data_time: 0.8560  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:09:34 d2.utils.events]: \u001b[0m eta: 1:30:06  iter: 3359  total_loss: 1.538  loss_cls: 0.3569  loss_box_reg: 0.5108  loss_mask: 0.312  loss_rpn_cls: 0.1062  loss_rpn_loc: 0.2102  time: 1.5230  data_time: 1.2871  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:10:11 d2.utils.events]: \u001b[0m eta: 1:30:10  iter: 3379  total_loss: 1.572  loss_cls: 0.3587  loss_box_reg: 0.5343  loss_mask: 0.3239  loss_rpn_cls: 0.1031  loss_rpn_loc: 0.2074  time: 1.5249  data_time: 1.1701  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:10:25 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:10:25 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 22:10:25 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 22:10:25 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 22:10:26 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:10:26 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 22:10:28 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0021 s/iter. Inference: 0.0921 s/iter. Eval: 0.0443 s/iter. Total: 0.1385 s/iter. ETA=0:00:15\n",
      "\u001b[32m[04/12 22:10:33 d2.evaluation.evaluator]: \u001b[0mInference done 48/121. Dataloading: 0.0029 s/iter. Inference: 0.0936 s/iter. Eval: 0.0418 s/iter. Total: 0.1384 s/iter. ETA=0:00:10\n",
      "\u001b[32m[04/12 22:10:38 d2.evaluation.evaluator]: \u001b[0mInference done 85/121. Dataloading: 0.0028 s/iter. Inference: 0.0932 s/iter. Eval: 0.0408 s/iter. Total: 0.1370 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 22:10:42 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.609417 (0.134564 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:10:42 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.092408 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:10:42 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 22:10:42 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2525057651861968\n",
      "\u001b[32m[04/12 22:10:43 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25251, not better than best score 0.25612 @ iteration 2661.\n",
      "\u001b[32m[04/12 22:10:54 d2.utils.events]: \u001b[0m eta: 1:30:17  iter: 3399  total_loss: 1.369  loss_cls: 0.2915  loss_box_reg: 0.5073  loss_mask: 0.2927  loss_rpn_cls: 0.07299  loss_rpn_loc: 0.1932  time: 1.5234  data_time: 0.6541  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:11:25 d2.utils.events]: \u001b[0m eta: 1:30:49  iter: 3419  total_loss: 1.497  loss_cls: 0.3408  loss_box_reg: 0.5255  loss_mask: 0.3238  loss_rpn_cls: 0.09249  loss_rpn_loc: 0.2182  time: 1.5237  data_time: 0.9135  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:12:00 d2.utils.events]: \u001b[0m eta: 1:31:07  iter: 3439  total_loss: 1.557  loss_cls: 0.3686  loss_box_reg: 0.5205  loss_mask: 0.3193  loss_rpn_cls: 0.09237  loss_rpn_loc: 0.2198  time: 1.5248  data_time: 1.0506  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:12:29 d2.utils.events]: \u001b[0m eta: 1:30:08  iter: 3459  total_loss: 1.455  loss_cls: 0.3286  loss_box_reg: 0.4959  loss_mask: 0.3104  loss_rpn_cls: 0.08178  loss_rpn_loc: 0.2146  time: 1.5246  data_time: 0.8455  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:12:55 d2.utils.events]: \u001b[0m eta: 1:29:23  iter: 3479  total_loss: 1.441  loss_cls: 0.3147  loss_box_reg: 0.5252  loss_mask: 0.2996  loss_rpn_cls: 0.07659  loss_rpn_loc: 0.2013  time: 1.5230  data_time: 0.6277  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:13:26 d2.utils.events]: \u001b[0m eta: 1:29:08  iter: 3499  total_loss: 1.478  loss_cls: 0.3232  loss_box_reg: 0.5344  loss_mask: 0.3094  loss_rpn_cls: 0.08829  loss_rpn_loc: 0.2113  time: 1.5232  data_time: 0.8992  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:14:03 d2.utils.events]: \u001b[0m eta: 1:29:19  iter: 3519  total_loss: 1.451  loss_cls: 0.3066  loss_box_reg: 0.5014  loss_mask: 0.3085  loss_rpn_cls: 0.09275  loss_rpn_loc: 0.2017  time: 1.5251  data_time: 1.1719  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:14:39 d2.utils.events]: \u001b[0m eta: 1:29:19  iter: 3539  total_loss: 1.458  loss_cls: 0.3124  loss_box_reg: 0.529  loss_mask: 0.3141  loss_rpn_cls: 0.09941  loss_rpn_loc: 0.2251  time: 1.5267  data_time: 1.1557  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:15:05 d2.utils.events]: \u001b[0m eta: 1:29:10  iter: 3559  total_loss: 1.537  loss_cls: 0.3522  loss_box_reg: 0.561  loss_mask: 0.3021  loss_rpn_cls: 0.1014  loss_rpn_loc: 0.2179  time: 1.5255  data_time: 0.6913  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:15:29 d2.utils.events]: \u001b[0m eta: 1:27:47  iter: 3579  total_loss: 1.247  loss_cls: 0.2967  loss_box_reg: 0.5112  loss_mask: 0.2839  loss_rpn_cls: 0.0703  loss_rpn_loc: 0.1757  time: 1.5237  data_time: 0.5715  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:16:05 d2.utils.events]: \u001b[0m eta: 1:27:46  iter: 3599  total_loss: 1.505  loss_cls: 0.3503  loss_box_reg: 0.5305  loss_mask: 0.3091  loss_rpn_cls: 0.0918  loss_rpn_loc: 0.2219  time: 1.5250  data_time: 1.0579  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:17:02 d2.utils.events]: \u001b[0m eta: 1:27:56  iter: 3619  total_loss: 1.551  loss_cls: 0.3978  loss_box_reg: 0.5435  loss_mask: 0.3164  loss_rpn_cls: 0.1073  loss_rpn_loc: 0.2271  time: 1.5323  data_time: 2.2043  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:17:20 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:17:21 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 22:17:21 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 22:17:21 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 22:17:22 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:17:22 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 22:17:24 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0018 s/iter. Inference: 0.0901 s/iter. Eval: 0.0455 s/iter. Total: 0.1374 s/iter. ETA=0:00:15\n",
      "\u001b[32m[04/12 22:17:29 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0022 s/iter. Inference: 0.0905 s/iter. Eval: 0.0370 s/iter. Total: 0.1298 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 22:17:34 d2.evaluation.evaluator]: \u001b[0mInference done 88/121. Dataloading: 0.0030 s/iter. Inference: 0.0914 s/iter. Eval: 0.0370 s/iter. Total: 0.1315 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 22:17:38 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.122629 (0.130367 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:17:38 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.090878 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:17:38 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 22:17:38 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.24863626950220175\n",
      "\u001b[32m[04/12 22:17:38 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.24864, not better than best score 0.25612 @ iteration 2661.\n",
      "\u001b[32m[04/12 22:17:58 d2.utils.events]: \u001b[0m eta: 1:27:24  iter: 3639  total_loss: 1.434  loss_cls: 0.3461  loss_box_reg: 0.4922  loss_mask: 0.3007  loss_rpn_cls: 0.09861  loss_rpn_loc: 0.2019  time: 1.5342  data_time: 1.1676  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:18:36 d2.utils.events]: \u001b[0m eta: 1:27:08  iter: 3659  total_loss: 1.42  loss_cls: 0.3624  loss_box_reg: 0.4777  loss_mask: 0.306  loss_rpn_cls: 0.1106  loss_rpn_loc: 0.2177  time: 1.5362  data_time: 1.2179  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:19:05 d2.utils.events]: \u001b[0m eta: 1:25:47  iter: 3679  total_loss: 1.361  loss_cls: 0.2998  loss_box_reg: 0.5031  loss_mask: 0.3057  loss_rpn_cls: 0.06264  loss_rpn_loc: 0.1854  time: 1.5358  data_time: 0.8171  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:19:41 d2.utils.events]: \u001b[0m eta: 1:25:19  iter: 3699  total_loss: 1.463  loss_cls: 0.3361  loss_box_reg: 0.4926  loss_mask: 0.2983  loss_rpn_cls: 0.07554  loss_rpn_loc: 0.2222  time: 1.5372  data_time: 1.1615  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:20:04 d2.utils.events]: \u001b[0m eta: 1:24:46  iter: 3719  total_loss: 1.472  loss_cls: 0.3279  loss_box_reg: 0.5255  loss_mask: 0.3053  loss_rpn_cls: 0.07372  loss_rpn_loc: 0.1924  time: 1.5351  data_time: 0.5250  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:20:38 d2.utils.events]: \u001b[0m eta: 1:24:35  iter: 3739  total_loss: 1.641  loss_cls: 0.3841  loss_box_reg: 0.5485  loss_mask: 0.3163  loss_rpn_cls: 0.1035  loss_rpn_loc: 0.2199  time: 1.5360  data_time: 1.0448  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:21:08 d2.utils.events]: \u001b[0m eta: 1:24:13  iter: 3759  total_loss: 1.379  loss_cls: 0.2936  loss_box_reg: 0.5353  loss_mask: 0.3113  loss_rpn_cls: 0.06068  loss_rpn_loc: 0.2012  time: 1.5357  data_time: 0.8268  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:21:42 d2.utils.events]: \u001b[0m eta: 1:24:52  iter: 3779  total_loss: 1.42  loss_cls: 0.332  loss_box_reg: 0.4967  loss_mask: 0.2931  loss_rpn_cls: 0.08138  loss_rpn_loc: 0.2082  time: 1.5367  data_time: 1.0397  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:22:11 d2.utils.events]: \u001b[0m eta: 1:24:36  iter: 3799  total_loss: 1.423  loss_cls: 0.3048  loss_box_reg: 0.4989  loss_mask: 0.2968  loss_rpn_cls: 0.1006  loss_rpn_loc: 0.2107  time: 1.5362  data_time: 0.8023  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:22:49 d2.utils.events]: \u001b[0m eta: 1:24:19  iter: 3819  total_loss: 1.471  loss_cls: 0.3436  loss_box_reg: 0.5035  loss_mask: 0.3042  loss_rpn_cls: 0.1199  loss_rpn_loc: 0.2165  time: 1.5382  data_time: 1.2226  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:23:18 d2.utils.events]: \u001b[0m eta: 1:23:20  iter: 3839  total_loss: 1.462  loss_cls: 0.3525  loss_box_reg: 0.5534  loss_mask: 0.3239  loss_rpn_cls: 0.08159  loss_rpn_loc: 0.1972  time: 1.5375  data_time: 0.7807  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:23:40 d2.utils.events]: \u001b[0m eta: 1:22:20  iter: 3859  total_loss: 1.354  loss_cls: 0.2882  loss_box_reg: 0.5246  loss_mask: 0.3081  loss_rpn_cls: 0.06233  loss_rpn_loc: 0.1965  time: 1.5352  data_time: 0.4824  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:23:53 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:23:53 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 22:23:53 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 22:23:53 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 22:23:53 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:23:54 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 22:23:56 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0014 s/iter. Inference: 0.0890 s/iter. Eval: 0.0323 s/iter. Total: 0.1228 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 22:24:01 d2.evaluation.evaluator]: \u001b[0mInference done 48/121. Dataloading: 0.0024 s/iter. Inference: 0.0935 s/iter. Eval: 0.0402 s/iter. Total: 0.1362 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 22:24:06 d2.evaluation.evaluator]: \u001b[0mInference done 87/121. Dataloading: 0.0020 s/iter. Inference: 0.0922 s/iter. Eval: 0.0383 s/iter. Total: 0.1327 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 22:24:11 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.354635 (0.132368 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:24:11 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091869 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:24:11 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 22:24:11 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2560849826852373\n",
      "\u001b[32m[04/12 22:24:11 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25608, not better than best score 0.25612 @ iteration 2661.\n",
      "\u001b[32m[04/12 22:24:22 d2.utils.events]: \u001b[0m eta: 1:22:04  iter: 3879  total_loss: 1.46  loss_cls: 0.3417  loss_box_reg: 0.5334  loss_mask: 0.3006  loss_rpn_cls: 0.07352  loss_rpn_loc: 0.189  time: 1.5334  data_time: 0.5317  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:24:54 d2.utils.events]: \u001b[0m eta: 1:22:23  iter: 3899  total_loss: 1.582  loss_cls: 0.3653  loss_box_reg: 0.5234  loss_mask: 0.3127  loss_rpn_cls: 0.09956  loss_rpn_loc: 0.208  time: 1.5336  data_time: 0.9298  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:25:23 d2.utils.events]: \u001b[0m eta: 1:22:57  iter: 3919  total_loss: 1.559  loss_cls: 0.3696  loss_box_reg: 0.5426  loss_mask: 0.3086  loss_rpn_cls: 0.07617  loss_rpn_loc: 0.2128  time: 1.5333  data_time: 0.8137  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:25:55 d2.utils.events]: \u001b[0m eta: 1:22:39  iter: 3939  total_loss: 1.46  loss_cls: 0.3285  loss_box_reg: 0.4975  loss_mask: 0.3019  loss_rpn_cls: 0.08823  loss_rpn_loc: 0.1968  time: 1.5337  data_time: 0.9771  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:26:37 d2.utils.events]: \u001b[0m eta: 1:22:22  iter: 3959  total_loss: 1.609  loss_cls: 0.3866  loss_box_reg: 0.5385  loss_mask: 0.306  loss_rpn_cls: 0.1093  loss_rpn_loc: 0.2278  time: 1.5365  data_time: 1.4200  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:27:02 d2.utils.events]: \u001b[0m eta: 1:22:06  iter: 3979  total_loss: 1.382  loss_cls: 0.3428  loss_box_reg: 0.5201  loss_mask: 0.2885  loss_rpn_cls: 0.05916  loss_rpn_loc: 0.1947  time: 1.5351  data_time: 0.6396  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:27:26 d2.utils.events]: \u001b[0m eta: 1:21:15  iter: 3999  total_loss: 1.337  loss_cls: 0.2658  loss_box_reg: 0.5019  loss_mask: 0.2911  loss_rpn_cls: 0.06166  loss_rpn_loc: 0.189  time: 1.5333  data_time: 0.5682  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:27:55 d2.utils.events]: \u001b[0m eta: 1:20:33  iter: 4019  total_loss: 1.352  loss_cls: 0.3198  loss_box_reg: 0.4767  loss_mask: 0.2958  loss_rpn_cls: 0.07732  loss_rpn_loc: 0.196  time: 1.5330  data_time: 0.8337  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:28:26 d2.utils.events]: \u001b[0m eta: 1:20:54  iter: 4039  total_loss: 1.422  loss_cls: 0.2967  loss_box_reg: 0.4911  loss_mask: 0.2948  loss_rpn_cls: 0.07575  loss_rpn_loc: 0.1924  time: 1.5331  data_time: 0.9035  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:29:00 d2.utils.events]: \u001b[0m eta: 1:19:53  iter: 4059  total_loss: 1.376  loss_cls: 0.281  loss_box_reg: 0.4928  loss_mask: 0.3057  loss_rpn_cls: 0.07934  loss_rpn_loc: 0.2025  time: 1.5339  data_time: 1.0440  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:29:34 d2.utils.events]: \u001b[0m eta: 1:19:37  iter: 4079  total_loss: 1.542  loss_cls: 0.3846  loss_box_reg: 0.5394  loss_mask: 0.3006  loss_rpn_cls: 0.09441  loss_rpn_loc: 0.2147  time: 1.5346  data_time: 1.0053  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:30:03 d2.utils.events]: \u001b[0m eta: 1:19:54  iter: 4099  total_loss: 1.544  loss_cls: 0.3644  loss_box_reg: 0.5519  loss_mask: 0.3152  loss_rpn_cls: 0.09829  loss_rpn_loc: 0.211  time: 1.5343  data_time: 0.8388  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:30:25 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:30:25 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 22:30:25 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 22:30:25 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 22:30:26 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:30:26 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 22:30:28 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0015 s/iter. Inference: 0.0906 s/iter. Eval: 0.0347 s/iter. Total: 0.1268 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 22:30:33 d2.evaluation.evaluator]: \u001b[0mInference done 49/121. Dataloading: 0.0020 s/iter. Inference: 0.0918 s/iter. Eval: 0.0376 s/iter. Total: 0.1314 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 22:30:38 d2.evaluation.evaluator]: \u001b[0mInference done 87/121. Dataloading: 0.0023 s/iter. Inference: 0.0917 s/iter. Eval: 0.0379 s/iter. Total: 0.1320 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 22:30:42 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.145822 (0.130567 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:30:42 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091189 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:30:42 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 22:30:42 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.25871111997371177\n",
      "\u001b[32m[04/12 22:30:43 d2.engine.hooks]: \u001b[0mSaved best model as latest eval score for MaP IoU is 0.25871, better than last best score 0.25612 @ iteration 2661.\n",
      "\u001b[32m[04/12 22:30:50 d2.utils.events]: \u001b[0m eta: 1:19:36  iter: 4119  total_loss: 1.446  loss_cls: 0.2959  loss_box_reg: 0.5236  loss_mask: 0.3124  loss_rpn_cls: 0.07758  loss_rpn_loc: 0.2015  time: 1.5339  data_time: 0.8069  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:31:19 d2.utils.events]: \u001b[0m eta: 1:19:10  iter: 4139  total_loss: 1.472  loss_cls: 0.3399  loss_box_reg: 0.4848  loss_mask: 0.3105  loss_rpn_cls: 0.0919  loss_rpn_loc: 0.2129  time: 1.5334  data_time: 0.7777  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:31:51 d2.utils.events]: \u001b[0m eta: 1:19:03  iter: 4159  total_loss: 1.563  loss_cls: 0.3569  loss_box_reg: 0.51  loss_mask: 0.307  loss_rpn_cls: 0.1102  loss_rpn_loc: 0.2237  time: 1.5338  data_time: 0.9626  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:32:22 d2.utils.events]: \u001b[0m eta: 1:18:09  iter: 4179  total_loss: 1.378  loss_cls: 0.3009  loss_box_reg: 0.5116  loss_mask: 0.2989  loss_rpn_cls: 0.07996  loss_rpn_loc: 0.2085  time: 1.5338  data_time: 0.8766  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:32:49 d2.utils.events]: \u001b[0m eta: 1:18:00  iter: 4199  total_loss: 1.483  loss_cls: 0.3438  loss_box_reg: 0.5228  loss_mask: 0.2972  loss_rpn_cls: 0.09066  loss_rpn_loc: 0.2182  time: 1.5330  data_time: 0.7221  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:33:16 d2.utils.events]: \u001b[0m eta: 1:16:42  iter: 4219  total_loss: 1.304  loss_cls: 0.3105  loss_box_reg: 0.5052  loss_mask: 0.2999  loss_rpn_cls: 0.07762  loss_rpn_loc: 0.1915  time: 1.5321  data_time: 0.7146  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:33:42 d2.utils.events]: \u001b[0m eta: 1:16:48  iter: 4239  total_loss: 1.398  loss_cls: 0.3416  loss_box_reg: 0.5171  loss_mask: 0.2898  loss_rpn_cls: 0.0678  loss_rpn_loc: 0.1887  time: 1.5309  data_time: 0.6452  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:34:07 d2.utils.events]: \u001b[0m eta: 1:16:52  iter: 4259  total_loss: 1.448  loss_cls: 0.3125  loss_box_reg: 0.5261  loss_mask: 0.3016  loss_rpn_cls: 0.0776  loss_rpn_loc: 0.1922  time: 1.5297  data_time: 0.6688  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:34:43 d2.utils.events]: \u001b[0m eta: 1:16:40  iter: 4279  total_loss: 1.44  loss_cls: 0.3343  loss_box_reg: 0.482  loss_mask: 0.3012  loss_rpn_cls: 0.08836  loss_rpn_loc: 0.2087  time: 1.5309  data_time: 1.1384  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:35:14 d2.utils.events]: \u001b[0m eta: 1:16:32  iter: 4299  total_loss: 1.489  loss_cls: 0.3647  loss_box_reg: 0.4824  loss_mask: 0.3059  loss_rpn_cls: 0.08492  loss_rpn_loc: 0.1907  time: 1.5309  data_time: 0.9047  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:35:43 d2.utils.events]: \u001b[0m eta: 1:16:27  iter: 4319  total_loss: 1.42  loss_cls: 0.3268  loss_box_reg: 0.5336  loss_mask: 0.3061  loss_rpn_cls: 0.06391  loss_rpn_loc: 0.1961  time: 1.5305  data_time: 0.7687  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:36:22 d2.utils.events]: \u001b[0m eta: 1:16:27  iter: 4339  total_loss: 1.486  loss_cls: 0.3098  loss_box_reg: 0.4997  loss_mask: 0.316  loss_rpn_cls: 0.1065  loss_rpn_loc: 0.2111  time: 1.5325  data_time: 1.2932  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:36:53 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:36:54 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 22:36:54 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 22:36:54 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 22:36:55 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:36:55 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 22:36:57 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0019 s/iter. Inference: 0.0914 s/iter. Eval: 0.0356 s/iter. Total: 0.1289 s/iter. ETA=0:00:14\n",
      "\u001b[32m[04/12 22:37:02 d2.evaluation.evaluator]: \u001b[0mInference done 44/121. Dataloading: 0.0041 s/iter. Inference: 0.0986 s/iter. Eval: 0.0459 s/iter. Total: 0.1486 s/iter. ETA=0:00:11\n",
      "\u001b[32m[04/12 22:37:07 d2.evaluation.evaluator]: \u001b[0mInference done 84/121. Dataloading: 0.0030 s/iter. Inference: 0.0946 s/iter. Eval: 0.0409 s/iter. Total: 0.1385 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/12 22:37:12 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.793868 (0.136154 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:37:12 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.093497 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:37:12 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 22:37:12 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.26054665738449573\n",
      "\u001b[32m[04/12 22:37:13 d2.engine.hooks]: \u001b[0mSaved best model as latest eval score for MaP IoU is 0.26055, better than last best score 0.25871 @ iteration 4113.\n",
      "\u001b[32m[04/12 22:37:16 d2.utils.events]: \u001b[0m eta: 1:15:31  iter: 4359  total_loss: 1.428  loss_cls: 0.3153  loss_box_reg: 0.5051  loss_mask: 0.3071  loss_rpn_cls: 0.09101  loss_rpn_loc: 0.1958  time: 1.5333  data_time: 1.0172  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:37:48 d2.utils.events]: \u001b[0m eta: 1:15:15  iter: 4379  total_loss: 1.461  loss_cls: 0.3229  loss_box_reg: 0.5131  loss_mask: 0.3118  loss_rpn_cls: 0.08456  loss_rpn_loc: 0.2128  time: 1.5337  data_time: 0.9741  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:38:24 d2.utils.events]: \u001b[0m eta: 1:15:11  iter: 4399  total_loss: 1.351  loss_cls: 0.3122  loss_box_reg: 0.4744  loss_mask: 0.2895  loss_rpn_cls: 0.1002  loss_rpn_loc: 0.2065  time: 1.5347  data_time: 1.1172  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:38:50 d2.utils.events]: \u001b[0m eta: 1:13:57  iter: 4419  total_loss: 1.538  loss_cls: 0.3277  loss_box_reg: 0.5493  loss_mask: 0.3074  loss_rpn_cls: 0.08555  loss_rpn_loc: 0.2274  time: 1.5337  data_time: 0.6708  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:39:31 d2.utils.events]: \u001b[0m eta: 1:13:45  iter: 4439  total_loss: 1.493  loss_cls: 0.3563  loss_box_reg: 0.5001  loss_mask: 0.3053  loss_rpn_cls: 0.09925  loss_rpn_loc: 0.2106  time: 1.5360  data_time: 1.3589  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:39:55 d2.utils.events]: \u001b[0m eta: 1:13:20  iter: 4459  total_loss: 1.332  loss_cls: 0.2761  loss_box_reg: 0.4873  loss_mask: 0.2774  loss_rpn_cls: 0.03598  loss_rpn_loc: 0.1706  time: 1.5345  data_time: 0.6250  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:40:24 d2.utils.events]: \u001b[0m eta: 1:12:50  iter: 4479  total_loss: 1.356  loss_cls: 0.3116  loss_box_reg: 0.4935  loss_mask: 0.3014  loss_rpn_cls: 0.06079  loss_rpn_loc: 0.1919  time: 1.5341  data_time: 0.7949  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:40:55 d2.utils.events]: \u001b[0m eta: 1:12:22  iter: 4499  total_loss: 1.44  loss_cls: 0.3249  loss_box_reg: 0.5041  loss_mask: 0.3068  loss_rpn_cls: 0.07966  loss_rpn_loc: 0.1979  time: 1.5342  data_time: 0.9145  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:41:34 d2.utils.events]: \u001b[0m eta: 1:12:27  iter: 4519  total_loss: 1.51  loss_cls: 0.3453  loss_box_reg: 0.5145  loss_mask: 0.3072  loss_rpn_cls: 0.1164  loss_rpn_loc: 0.2191  time: 1.5361  data_time: 1.2599  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:42:08 d2.utils.events]: \u001b[0m eta: 1:12:02  iter: 4539  total_loss: 1.419  loss_cls: 0.355  loss_box_reg: 0.4908  loss_mask: 0.298  loss_rpn_cls: 0.09035  loss_rpn_loc: 0.1972  time: 1.5367  data_time: 1.0309  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:42:38 d2.utils.events]: \u001b[0m eta: 1:11:35  iter: 4559  total_loss: 1.409  loss_cls: 0.3032  loss_box_reg: 0.4969  loss_mask: 0.3086  loss_rpn_cls: 0.06703  loss_rpn_loc: 0.2112  time: 1.5366  data_time: 0.8406  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:43:00 d2.utils.events]: \u001b[0m eta: 1:11:40  iter: 4579  total_loss: 1.465  loss_cls: 0.3363  loss_box_reg: 0.5638  loss_mask: 0.3191  loss_rpn_cls: 0.08455  loss_rpn_loc: 0.1997  time: 1.5346  data_time: 0.4813  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:43:22 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:43:23 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 22:43:23 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 22:43:23 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 22:43:24 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_valid_f4.json takes 1.24 seconds.\n",
      "\u001b[32m[04/12 22:43:24 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:43:25 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 22:43:27 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0036 s/iter. Inference: 0.0966 s/iter. Eval: 0.0513 s/iter. Total: 0.1516 s/iter. ETA=0:00:16\n",
      "\u001b[32m[04/12 22:43:32 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0022 s/iter. Inference: 0.0915 s/iter. Eval: 0.0379 s/iter. Total: 0.1318 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 22:43:37 d2.evaluation.evaluator]: \u001b[0mInference done 83/121. Dataloading: 0.0025 s/iter. Inference: 0.0939 s/iter. Eval: 0.0440 s/iter. Total: 0.1405 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/12 22:43:42 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.830199 (0.136467 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:43:42 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.092696 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:43:42 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 22:43:42 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.25097113042375435\n",
      "\u001b[32m[04/12 22:43:42 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25097, not better than best score 0.26055 @ iteration 4355.\n",
      "\u001b[32m[04/12 22:43:44 d2.utils.events]: \u001b[0m eta: 1:10:53  iter: 4599  total_loss: 1.352  loss_cls: 0.3102  loss_box_reg: 0.5193  loss_mask: 0.2911  loss_rpn_cls: 0.05608  loss_rpn_loc: 0.1885  time: 1.5331  data_time: 0.5101  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:44:06 d2.utils.events]: \u001b[0m eta: 1:10:12  iter: 4619  total_loss: 1.378  loss_cls: 0.3158  loss_box_reg: 0.4893  loss_mask: 0.2974  loss_rpn_cls: 0.07076  loss_rpn_loc: 0.1986  time: 1.5312  data_time: 0.4729  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:44:35 d2.utils.events]: \u001b[0m eta: 1:09:52  iter: 4639  total_loss: 1.369  loss_cls: 0.3156  loss_box_reg: 0.4894  loss_mask: 0.2937  loss_rpn_cls: 0.07754  loss_rpn_loc: 0.1876  time: 1.5308  data_time: 0.8281  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:44:51 d2.utils.events]: \u001b[0m eta: 1:08:40  iter: 4659  total_loss: 1.395  loss_cls: 0.3271  loss_box_reg: 0.5317  loss_mask: 0.2915  loss_rpn_cls: 0.06705  loss_rpn_loc: 0.1861  time: 1.5278  data_time: 0.2346  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:45:19 d2.utils.events]: \u001b[0m eta: 1:08:42  iter: 4679  total_loss: 1.421  loss_cls: 0.3306  loss_box_reg: 0.5294  loss_mask: 0.2991  loss_rpn_cls: 0.06976  loss_rpn_loc: 0.1969  time: 1.5271  data_time: 0.7150  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:45:52 d2.utils.events]: \u001b[0m eta: 1:08:29  iter: 4699  total_loss: 1.428  loss_cls: 0.3223  loss_box_reg: 0.5083  loss_mask: 0.3047  loss_rpn_cls: 0.08894  loss_rpn_loc: 0.2026  time: 1.5277  data_time: 1.0214  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:46:16 d2.utils.events]: \u001b[0m eta: 1:08:22  iter: 4719  total_loss: 1.309  loss_cls: 0.2957  loss_box_reg: 0.5131  loss_mask: 0.2904  loss_rpn_cls: 0.06037  loss_rpn_loc: 0.1789  time: 1.5263  data_time: 0.5719  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:46:44 d2.utils.events]: \u001b[0m eta: 1:07:38  iter: 4739  total_loss: 1.44  loss_cls: 0.3017  loss_box_reg: 0.5232  loss_mask: 0.3072  loss_rpn_cls: 0.06528  loss_rpn_loc: 0.2005  time: 1.5257  data_time: 0.7844  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:47:30 d2.utils.events]: \u001b[0m eta: 1:07:42  iter: 4759  total_loss: 1.556  loss_cls: 0.378  loss_box_reg: 0.5389  loss_mask: 0.3176  loss_rpn_cls: 0.09753  loss_rpn_loc: 0.2353  time: 1.5290  data_time: 1.6085  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:48:02 d2.utils.events]: \u001b[0m eta: 1:07:17  iter: 4779  total_loss: 1.435  loss_cls: 0.3393  loss_box_reg: 0.5047  loss_mask: 0.311  loss_rpn_cls: 0.08793  loss_rpn_loc: 0.2018  time: 1.5292  data_time: 0.9380  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:48:41 d2.utils.events]: \u001b[0m eta: 1:07:09  iter: 4799  total_loss: 1.472  loss_cls: 0.3422  loss_box_reg: 0.4966  loss_mask: 0.305  loss_rpn_cls: 0.1006  loss_rpn_loc: 0.2231  time: 1.5310  data_time: 1.2996  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:49:16 d2.utils.events]: \u001b[0m eta: 1:06:56  iter: 4819  total_loss: 1.437  loss_cls: 0.3116  loss_box_reg: 0.497  loss_mask: 0.3168  loss_rpn_cls: 0.09431  loss_rpn_loc: 0.2005  time: 1.5320  data_time: 1.1255  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:49:49 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_valid_f4.json takes 1.20 seconds.\n",
      "\u001b[32m[04/12 22:49:49 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:49:50 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 22:49:50 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 22:49:50 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 22:49:50 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:49:51 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 22:49:53 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0044 s/iter. Inference: 0.0975 s/iter. Eval: 0.0506 s/iter. Total: 0.1524 s/iter. ETA=0:00:16\n",
      "\u001b[32m[04/12 22:49:58 d2.evaluation.evaluator]: \u001b[0mInference done 49/121. Dataloading: 0.0023 s/iter. Inference: 0.0926 s/iter. Eval: 0.0395 s/iter. Total: 0.1345 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 22:50:03 d2.evaluation.evaluator]: \u001b[0mInference done 86/121. Dataloading: 0.0026 s/iter. Inference: 0.0932 s/iter. Eval: 0.0396 s/iter. Total: 0.1355 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 22:50:07 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.527553 (0.133858 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:50:07 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.092586 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:50:07 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 22:50:07 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2526711206792321\n",
      "\u001b[32m[04/12 22:50:07 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25267, not better than best score 0.26055 @ iteration 4355.\n",
      "\u001b[32m[04/12 22:50:07 d2.utils.events]: \u001b[0m eta: 1:07:10  iter: 4839  total_loss: 1.367  loss_cls: 0.3093  loss_box_reg: 0.4743  loss_mask: 0.2856  loss_rpn_cls: 0.1014  loss_rpn_loc: 0.2132  time: 1.5322  data_time: 0.9974  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:50:36 d2.utils.events]: \u001b[0m eta: 1:07:29  iter: 4859  total_loss: 1.465  loss_cls: 0.3179  loss_box_reg: 0.5304  loss_mask: 0.337  loss_rpn_cls: 0.08374  loss_rpn_loc: 0.2111  time: 1.5317  data_time: 0.7874  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:51:08 d2.utils.events]: \u001b[0m eta: 1:07:42  iter: 4879  total_loss: 1.343  loss_cls: 0.3053  loss_box_reg: 0.4789  loss_mask: 0.2986  loss_rpn_cls: 0.07196  loss_rpn_loc: 0.2096  time: 1.5321  data_time: 0.9692  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:51:47 d2.utils.events]: \u001b[0m eta: 1:07:26  iter: 4899  total_loss: 1.44  loss_cls: 0.3615  loss_box_reg: 0.4932  loss_mask: 0.3183  loss_rpn_cls: 0.08823  loss_rpn_loc: 0.2275  time: 1.5338  data_time: 1.2787  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:52:11 d2.utils.events]: \u001b[0m eta: 1:06:13  iter: 4919  total_loss: 1.33  loss_cls: 0.3144  loss_box_reg: 0.506  loss_mask: 0.2961  loss_rpn_cls: 0.05489  loss_rpn_loc: 0.182  time: 1.5323  data_time: 0.5419  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:52:43 d2.utils.events]: \u001b[0m eta: 1:06:38  iter: 4939  total_loss: 1.427  loss_cls: 0.3151  loss_box_reg: 0.5241  loss_mask: 0.3071  loss_rpn_cls: 0.08347  loss_rpn_loc: 0.1926  time: 1.5327  data_time: 0.9590  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:53:20 d2.utils.events]: \u001b[0m eta: 1:05:42  iter: 4959  total_loss: 1.373  loss_cls: 0.3009  loss_box_reg: 0.4782  loss_mask: 0.2953  loss_rpn_cls: 0.07949  loss_rpn_loc: 0.1855  time: 1.5340  data_time: 1.1673  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:53:48 d2.utils.events]: \u001b[0m eta: 1:05:57  iter: 4979  total_loss: 1.36  loss_cls: 0.3118  loss_box_reg: 0.5132  loss_mask: 0.2957  loss_rpn_cls: 0.07486  loss_rpn_loc: 0.1947  time: 1.5335  data_time: 0.7606  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:54:23 d2.utils.events]: \u001b[0m eta: 1:06:10  iter: 4999  total_loss: 1.507  loss_cls: 0.3368  loss_box_reg: 0.504  loss_mask: 0.3109  loss_rpn_cls: 0.1096  loss_rpn_loc: 0.2148  time: 1.5342  data_time: 1.0765  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:54:51 d2.utils.events]: \u001b[0m eta: 1:05:53  iter: 5019  total_loss: 1.277  loss_cls: 0.2724  loss_box_reg: 0.5048  loss_mask: 0.2927  loss_rpn_cls: 0.0751  loss_rpn_loc: 0.1793  time: 1.5337  data_time: 0.7779  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:55:15 d2.utils.events]: \u001b[0m eta: 1:04:58  iter: 5039  total_loss: 1.365  loss_cls: 0.3014  loss_box_reg: 0.5158  loss_mask: 0.2985  loss_rpn_cls: 0.0722  loss_rpn_loc: 0.186  time: 1.5323  data_time: 0.5762  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:55:47 d2.utils.events]: \u001b[0m eta: 1:04:29  iter: 5059  total_loss: 1.52  loss_cls: 0.3586  loss_box_reg: 0.5031  loss_mask: 0.3113  loss_rpn_cls: 0.06977  loss_rpn_loc: 0.2166  time: 1.5325  data_time: 0.9297  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:56:17 d2.utils.events]: \u001b[0m eta: 1:04:13  iter: 5079  total_loss: 1.467  loss_cls: 0.3419  loss_box_reg: 0.516  loss_mask: 0.3135  loss_rpn_cls: 0.09106  loss_rpn_loc: 0.2107  time: 1.5325  data_time: 0.8859  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:56:22 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:56:22 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 22:56:22 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 22:56:22 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 22:56:22 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 22:56:23 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 22:56:26 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0062 s/iter. Inference: 0.1034 s/iter. Eval: 0.0648 s/iter. Total: 0.1744 s/iter. ETA=0:00:19\n",
      "\u001b[32m[04/12 22:56:31 d2.evaluation.evaluator]: \u001b[0mInference done 42/121. Dataloading: 0.0053 s/iter. Inference: 0.1024 s/iter. Eval: 0.0591 s/iter. Total: 0.1669 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 22:56:36 d2.evaluation.evaluator]: \u001b[0mInference done 81/121. Dataloading: 0.0039 s/iter. Inference: 0.0967 s/iter. Eval: 0.0478 s/iter. Total: 0.1486 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/12 22:56:41 d2.evaluation.evaluator]: \u001b[0mInference done 121/121. Dataloading: 0.0032 s/iter. Inference: 0.0946 s/iter. Eval: 0.0435 s/iter. Total: 0.1413 s/iter. ETA=0:00:00\n",
      "\u001b[32m[04/12 22:56:41 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:16.451598 (0.141824 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:56:41 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.094576 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 22:56:41 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 22:56:41 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.24951530917116982\n",
      "\u001b[32m[04/12 22:56:41 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.24952, not better than best score 0.26055 @ iteration 4355.\n",
      "\u001b[32m[04/12 22:57:13 d2.utils.events]: \u001b[0m eta: 1:04:11  iter: 5099  total_loss: 1.419  loss_cls: 0.3427  loss_box_reg: 0.4978  loss_mask: 0.3033  loss_rpn_cls: 0.09725  loss_rpn_loc: 0.2124  time: 1.5335  data_time: 1.1167  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:57:36 d2.utils.events]: \u001b[0m eta: 1:03:42  iter: 5119  total_loss: 1.291  loss_cls: 0.2781  loss_box_reg: 0.5022  loss_mask: 0.286  loss_rpn_cls: 0.06327  loss_rpn_loc: 0.1938  time: 1.5320  data_time: 0.5169  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:58:03 d2.utils.events]: \u001b[0m eta: 1:03:21  iter: 5139  total_loss: 1.41  loss_cls: 0.3224  loss_box_reg: 0.5194  loss_mask: 0.3091  loss_rpn_cls: 0.08039  loss_rpn_loc: 0.1981  time: 1.5311  data_time: 0.7098  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:58:31 d2.utils.events]: \u001b[0m eta: 1:03:03  iter: 5159  total_loss: 1.383  loss_cls: 0.323  loss_box_reg: 0.514  loss_mask: 0.2936  loss_rpn_cls: 0.06233  loss_rpn_loc: 0.1865  time: 1.5307  data_time: 0.7973  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:59:08 d2.utils.events]: \u001b[0m eta: 1:03:40  iter: 5179  total_loss: 1.452  loss_cls: 0.3548  loss_box_reg: 0.4855  loss_mask: 0.2938  loss_rpn_cls: 0.1027  loss_rpn_loc: 0.2112  time: 1.5320  data_time: 1.1824  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 22:59:40 d2.utils.events]: \u001b[0m eta: 1:03:24  iter: 5199  total_loss: 1.274  loss_cls: 0.2623  loss_box_reg: 0.4769  loss_mask: 0.296  loss_rpn_cls: 0.07162  loss_rpn_loc: 0.1948  time: 1.5322  data_time: 0.9399  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:00:06 d2.utils.events]: \u001b[0m eta: 1:03:13  iter: 5219  total_loss: 1.533  loss_cls: 0.3201  loss_box_reg: 0.5749  loss_mask: 0.3142  loss_rpn_cls: 0.08192  loss_rpn_loc: 0.2046  time: 1.5313  data_time: 0.6809  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:00:47 d2.utils.events]: \u001b[0m eta: 1:03:13  iter: 5239  total_loss: 1.511  loss_cls: 0.3589  loss_box_reg: 0.506  loss_mask: 0.3038  loss_rpn_cls: 0.09345  loss_rpn_loc: 0.2107  time: 1.5331  data_time: 1.3382  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:01:18 d2.utils.events]: \u001b[0m eta: 1:03:30  iter: 5259  total_loss: 1.465  loss_cls: 0.3041  loss_box_reg: 0.507  loss_mask: 0.3102  loss_rpn_cls: 0.07965  loss_rpn_loc: 0.2045  time: 1.5333  data_time: 0.9452  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:01:36 d2.utils.events]: \u001b[0m eta: 1:02:26  iter: 5279  total_loss: 1.398  loss_cls: 0.2821  loss_box_reg: 0.5305  loss_mask: 0.3302  loss_rpn_cls: 0.069  loss_rpn_loc: 0.1937  time: 1.5308  data_time: 0.2875  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:02:05 d2.utils.events]: \u001b[0m eta: 1:02:05  iter: 5299  total_loss: 1.408  loss_cls: 0.3176  loss_box_reg: 0.4994  loss_mask: 0.2966  loss_rpn_cls: 0.06796  loss_rpn_loc: 0.1947  time: 1.5306  data_time: 0.8302  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:02:46 d2.utils.events]: \u001b[0m eta: 1:01:56  iter: 5319  total_loss: 1.462  loss_cls: 0.3594  loss_box_reg: 0.5012  loss_mask: 0.3068  loss_rpn_cls: 0.09083  loss_rpn_loc: 0.2134  time: 1.5325  data_time: 1.3484  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:02:55 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:02:55 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 23:02:55 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 23:02:55 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 23:02:56 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:02:56 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 23:02:58 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0014 s/iter. Inference: 0.0886 s/iter. Eval: 0.0315 s/iter. Total: 0.1215 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 23:03:03 d2.evaluation.evaluator]: \u001b[0mInference done 49/121. Dataloading: 0.0023 s/iter. Inference: 0.0910 s/iter. Eval: 0.0375 s/iter. Total: 0.1309 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 23:03:08 d2.evaluation.evaluator]: \u001b[0mInference done 88/121. Dataloading: 0.0022 s/iter. Inference: 0.0910 s/iter. Eval: 0.0371 s/iter. Total: 0.1305 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 23:03:12 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.149939 (0.130603 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:03:12 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.090946 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:03:12 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 23:03:12 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2636671245612495\n",
      "\u001b[32m[04/12 23:03:12 d2.engine.hooks]: \u001b[0mSaved best model as latest eval score for MaP IoU is 0.26367, better than last best score 0.26055 @ iteration 4355.\n",
      "\u001b[32m[04/12 23:03:37 d2.utils.events]: \u001b[0m eta: 1:01:42  iter: 5339  total_loss: 1.374  loss_cls: 0.3091  loss_box_reg: 0.4821  loss_mask: 0.2861  loss_rpn_cls: 0.08353  loss_rpn_loc: 0.1995  time: 1.5330  data_time: 0.9862  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:04:11 d2.utils.events]: \u001b[0m eta: 1:01:40  iter: 5359  total_loss: 1.455  loss_cls: 0.3085  loss_box_reg: 0.4997  loss_mask: 0.3209  loss_rpn_cls: 0.09595  loss_rpn_loc: 0.2093  time: 1.5335  data_time: 1.0431  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:04:32 d2.utils.events]: \u001b[0m eta: 1:01:11  iter: 5379  total_loss: 1.323  loss_cls: 0.3099  loss_box_reg: 0.5114  loss_mask: 0.3063  loss_rpn_cls: 0.06631  loss_rpn_loc: 0.177  time: 1.5318  data_time: 0.4643  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:05:07 d2.utils.events]: \u001b[0m eta: 1:00:53  iter: 5399  total_loss: 1.383  loss_cls: 0.2979  loss_box_reg: 0.5007  loss_mask: 0.3074  loss_rpn_cls: 0.09481  loss_rpn_loc: 0.2111  time: 1.5325  data_time: 1.0856  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:05:46 d2.utils.events]: \u001b[0m eta: 1:01:35  iter: 5419  total_loss: 1.483  loss_cls: 0.3524  loss_box_reg: 0.4959  loss_mask: 0.3035  loss_rpn_cls: 0.08933  loss_rpn_loc: 0.2009  time: 1.5340  data_time: 1.2805  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:06:16 d2.utils.events]: \u001b[0m eta: 1:01:04  iter: 5439  total_loss: 1.459  loss_cls: 0.3388  loss_box_reg: 0.5084  loss_mask: 0.2912  loss_rpn_cls: 0.08965  loss_rpn_loc: 0.2107  time: 1.5340  data_time: 0.9113  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:06:48 d2.utils.events]: \u001b[0m eta: 1:01:07  iter: 5459  total_loss: 1.328  loss_cls: 0.2989  loss_box_reg: 0.4909  loss_mask: 0.2952  loss_rpn_cls: 0.06314  loss_rpn_loc: 0.1901  time: 1.5342  data_time: 0.9288  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:07:15 d2.utils.events]: \u001b[0m eta: 1:00:43  iter: 5479  total_loss: 1.441  loss_cls: 0.287  loss_box_reg: 0.514  loss_mask: 0.3051  loss_rpn_cls: 0.07637  loss_rpn_loc: 0.2075  time: 1.5335  data_time: 0.7498  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:07:54 d2.utils.events]: \u001b[0m eta: 1:00:39  iter: 5499  total_loss: 1.466  loss_cls: 0.3702  loss_box_reg: 0.4983  loss_mask: 0.3104  loss_rpn_cls: 0.1269  loss_rpn_loc: 0.2053  time: 1.5350  data_time: 1.2844  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:08:25 d2.utils.events]: \u001b[0m eta: 1:00:00  iter: 5519  total_loss: 1.361  loss_cls: 0.3103  loss_box_reg: 0.5183  loss_mask: 0.2948  loss_rpn_cls: 0.07462  loss_rpn_loc: 0.1859  time: 1.5350  data_time: 0.9076  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:08:46 d2.utils.events]: \u001b[0m eta: 0:59:44  iter: 5539  total_loss: 1.392  loss_cls: 0.3079  loss_box_reg: 0.5022  loss_mask: 0.29  loss_rpn_cls: 0.06034  loss_rpn_loc: 0.1904  time: 1.5334  data_time: 0.4668  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:09:18 d2.utils.events]: \u001b[0m eta: 0:58:46  iter: 5559  total_loss: 1.334  loss_cls: 0.2883  loss_box_reg: 0.4833  loss_mask: 0.2792  loss_rpn_cls: 0.06959  loss_rpn_loc: 0.1879  time: 1.5335  data_time: 0.9018  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:09:24 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:09:25 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 23:09:25 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 23:09:25 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 23:09:26 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_valid_f4.json takes 1.71 seconds.\n",
      "\u001b[32m[04/12 23:09:26 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:09:27 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 23:09:29 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0017 s/iter. Inference: 0.0897 s/iter. Eval: 0.0334 s/iter. Total: 0.1247 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 23:09:34 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0019 s/iter. Inference: 0.0908 s/iter. Eval: 0.0361 s/iter. Total: 0.1288 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 23:09:39 d2.evaluation.evaluator]: \u001b[0mInference done 88/121. Dataloading: 0.0022 s/iter. Inference: 0.0918 s/iter. Eval: 0.0373 s/iter. Total: 0.1313 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 23:09:43 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.102738 (0.130196 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:09:43 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091155 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:09:43 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 23:09:43 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2588110274536544\n",
      "\u001b[32m[04/12 23:09:43 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25881, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/12 23:10:00 d2.utils.events]: \u001b[0m eta: 0:58:39  iter: 5579  total_loss: 1.431  loss_cls: 0.3032  loss_box_reg: 0.5149  loss_mask: 0.2834  loss_rpn_cls: 0.0815  loss_rpn_loc: 0.2041  time: 1.5322  data_time: 0.5356  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:10:33 d2.utils.events]: \u001b[0m eta: 0:58:56  iter: 5599  total_loss: 1.377  loss_cls: 0.3135  loss_box_reg: 0.5228  loss_mask: 0.3061  loss_rpn_cls: 0.0739  loss_rpn_loc: 0.1866  time: 1.5325  data_time: 0.9628  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:11:02 d2.utils.events]: \u001b[0m eta: 0:58:21  iter: 5619  total_loss: 1.386  loss_cls: 0.3251  loss_box_reg: 0.5275  loss_mask: 0.3027  loss_rpn_cls: 0.07892  loss_rpn_loc: 0.1931  time: 1.5323  data_time: 0.8492  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:11:27 d2.utils.events]: \u001b[0m eta: 0:58:31  iter: 5639  total_loss: 1.46  loss_cls: 0.3309  loss_box_reg: 0.5439  loss_mask: 0.2964  loss_rpn_cls: 0.07036  loss_rpn_loc: 0.1947  time: 1.5313  data_time: 0.6392  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:11:55 d2.utils.events]: \u001b[0m eta: 0:58:29  iter: 5659  total_loss: 1.474  loss_cls: 0.282  loss_box_reg: 0.5282  loss_mask: 0.3221  loss_rpn_cls: 0.05017  loss_rpn_loc: 0.2026  time: 1.5308  data_time: 0.7661  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:12:28 d2.utils.events]: \u001b[0m eta: 0:58:15  iter: 5679  total_loss: 1.365  loss_cls: 0.2996  loss_box_reg: 0.478  loss_mask: 0.2822  loss_rpn_cls: 0.08322  loss_rpn_loc: 0.2002  time: 1.5312  data_time: 0.9943  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:12:53 d2.utils.events]: \u001b[0m eta: 0:57:43  iter: 5699  total_loss: 1.345  loss_cls: 0.2978  loss_box_reg: 0.5083  loss_mask: 0.2919  loss_rpn_cls: 0.05339  loss_rpn_loc: 0.1995  time: 1.5303  data_time: 0.6429  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:13:25 d2.utils.events]: \u001b[0m eta: 0:57:27  iter: 5719  total_loss: 1.394  loss_cls: 0.3353  loss_box_reg: 0.4962  loss_mask: 0.2977  loss_rpn_cls: 0.09599  loss_rpn_loc: 0.2114  time: 1.5305  data_time: 0.9619  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:13:52 d2.utils.events]: \u001b[0m eta: 0:57:11  iter: 5739  total_loss: 1.447  loss_cls: 0.3372  loss_box_reg: 0.5078  loss_mask: 0.2988  loss_rpn_cls: 0.07203  loss_rpn_loc: 0.2071  time: 1.5298  data_time: 0.7031  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:14:23 d2.utils.events]: \u001b[0m eta: 0:56:16  iter: 5759  total_loss: 1.463  loss_cls: 0.3311  loss_box_reg: 0.5285  loss_mask: 0.3152  loss_rpn_cls: 0.09676  loss_rpn_loc: 0.2051  time: 1.5298  data_time: 0.8916  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:15:05 d2.utils.events]: \u001b[0m eta: 0:56:44  iter: 5779  total_loss: 1.458  loss_cls: 0.3391  loss_box_reg: 0.4833  loss_mask: 0.3049  loss_rpn_cls: 0.09996  loss_rpn_loc: 0.2074  time: 1.5319  data_time: 1.4481  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:15:42 d2.utils.events]: \u001b[0m eta: 0:56:37  iter: 5799  total_loss: 1.424  loss_cls: 0.348  loss_box_reg: 0.4826  loss_mask: 0.3054  loss_rpn_cls: 0.08667  loss_rpn_loc: 0.2032  time: 1.5329  data_time: 1.1542  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:15:52 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:15:53 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 23:15:53 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 23:15:53 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 23:15:53 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:15:54 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 23:15:55 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0013 s/iter. Inference: 0.0908 s/iter. Eval: 0.0349 s/iter. Total: 0.1269 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 23:16:01 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0018 s/iter. Inference: 0.0917 s/iter. Eval: 0.0373 s/iter. Total: 0.1308 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 23:16:06 d2.evaluation.evaluator]: \u001b[0mInference done 83/121. Dataloading: 0.0025 s/iter. Inference: 0.0952 s/iter. Eval: 0.0421 s/iter. Total: 0.1399 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/12 23:16:10 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.859525 (0.136720 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:16:10 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.093736 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:16:10 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 23:16:10 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2472822286179261\n",
      "\u001b[32m[04/12 23:16:11 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.24728, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/12 23:16:27 d2.utils.events]: \u001b[0m eta: 0:55:41  iter: 5819  total_loss: 1.315  loss_cls: 0.2967  loss_box_reg: 0.5062  loss_mask: 0.2853  loss_rpn_cls: 0.06467  loss_rpn_loc: 0.1817  time: 1.5321  data_time: 0.6448  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:17:01 d2.utils.events]: \u001b[0m eta: 0:55:15  iter: 5839  total_loss: 1.402  loss_cls: 0.3306  loss_box_reg: 0.5034  loss_mask: 0.3047  loss_rpn_cls: 0.09182  loss_rpn_loc: 0.1983  time: 1.5327  data_time: 1.0714  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:17:29 d2.utils.events]: \u001b[0m eta: 0:55:09  iter: 5859  total_loss: 1.408  loss_cls: 0.3197  loss_box_reg: 0.5032  loss_mask: 0.3043  loss_rpn_cls: 0.08574  loss_rpn_loc: 0.1921  time: 1.5323  data_time: 0.7588  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:17:54 d2.utils.events]: \u001b[0m eta: 0:54:39  iter: 5879  total_loss: 1.421  loss_cls: 0.3265  loss_box_reg: 0.5029  loss_mask: 0.2949  loss_rpn_cls: 0.05849  loss_rpn_loc: 0.1837  time: 1.5313  data_time: 0.6249  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:18:20 d2.utils.events]: \u001b[0m eta: 0:54:20  iter: 5899  total_loss: 1.426  loss_cls: 0.326  loss_box_reg: 0.5288  loss_mask: 0.3045  loss_rpn_cls: 0.08092  loss_rpn_loc: 0.2044  time: 1.5304  data_time: 0.6410  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:18:49 d2.utils.events]: \u001b[0m eta: 0:54:07  iter: 5919  total_loss: 1.366  loss_cls: 0.2808  loss_box_reg: 0.5154  loss_mask: 0.2963  loss_rpn_cls: 0.06686  loss_rpn_loc: 0.1987  time: 1.5302  data_time: 0.8149  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:19:18 d2.utils.events]: \u001b[0m eta: 0:53:47  iter: 5939  total_loss: 1.402  loss_cls: 0.3168  loss_box_reg: 0.4839  loss_mask: 0.3048  loss_rpn_cls: 0.05906  loss_rpn_loc: 0.1767  time: 1.5299  data_time: 0.8298  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:19:51 d2.utils.events]: \u001b[0m eta: 0:53:31  iter: 5959  total_loss: 1.472  loss_cls: 0.326  loss_box_reg: 0.5106  loss_mask: 0.2998  loss_rpn_cls: 0.07939  loss_rpn_loc: 0.2109  time: 1.5304  data_time: 1.0161  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:20:23 d2.utils.events]: \u001b[0m eta: 0:53:17  iter: 5979  total_loss: 1.404  loss_cls: 0.2877  loss_box_reg: 0.5173  loss_mask: 0.3027  loss_rpn_cls: 0.07041  loss_rpn_loc: 0.1806  time: 1.5306  data_time: 0.9164  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:21:11 d2.utils.events]: \u001b[0m eta: 0:53:05  iter: 5999  total_loss: 1.501  loss_cls: 0.3473  loss_box_reg: 0.4788  loss_mask: 0.3172  loss_rpn_cls: 0.09444  loss_rpn_loc: 0.2376  time: 1.5334  data_time: 1.7263  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:21:40 d2.utils.events]: \u001b[0m eta: 0:53:00  iter: 6019  total_loss: 1.385  loss_cls: 0.3184  loss_box_reg: 0.4923  loss_mask: 0.2976  loss_rpn_cls: 0.06551  loss_rpn_loc: 0.1812  time: 1.5332  data_time: 0.8440  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:22:11 d2.utils.events]: \u001b[0m eta: 0:52:44  iter: 6039  total_loss: 1.353  loss_cls: 0.2977  loss_box_reg: 0.5154  loss_mask: 0.2912  loss_rpn_cls: 0.06486  loss_rpn_loc: 0.1944  time: 1.5333  data_time: 0.9054  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:22:29 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:22:29 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 23:22:29 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 23:22:29 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 23:22:29 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:22:30 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 23:22:31 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0014 s/iter. Inference: 0.0890 s/iter. Eval: 0.0326 s/iter. Total: 0.1231 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 23:22:36 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0020 s/iter. Inference: 0.0905 s/iter. Eval: 0.0356 s/iter. Total: 0.1281 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 23:22:41 d2.evaluation.evaluator]: \u001b[0mInference done 84/121. Dataloading: 0.0026 s/iter. Inference: 0.0940 s/iter. Eval: 0.0398 s/iter. Total: 0.1364 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/12 23:22:46 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.433257 (0.133045 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:22:46 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.092582 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:22:46 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 23:22:46 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.25739022694474156\n",
      "\u001b[32m[04/12 23:22:46 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25739, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/12 23:23:00 d2.utils.events]: \u001b[0m eta: 0:52:53  iter: 6059  total_loss: 1.347  loss_cls: 0.3041  loss_box_reg: 0.4815  loss_mask: 0.2861  loss_rpn_cls: 0.1058  loss_rpn_loc: 0.2152  time: 1.5333  data_time: 0.8801  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:23:30 d2.utils.events]: \u001b[0m eta: 0:52:58  iter: 6079  total_loss: 1.436  loss_cls: 0.3427  loss_box_reg: 0.4952  loss_mask: 0.3015  loss_rpn_cls: 0.08333  loss_rpn_loc: 0.1989  time: 1.5332  data_time: 0.8549  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:24:11 d2.utils.events]: \u001b[0m eta: 0:52:34  iter: 6099  total_loss: 1.308  loss_cls: 0.2835  loss_box_reg: 0.4817  loss_mask: 0.2999  loss_rpn_cls: 0.07442  loss_rpn_loc: 0.2078  time: 1.5349  data_time: 1.3602  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:24:48 d2.utils.events]: \u001b[0m eta: 0:52:53  iter: 6119  total_loss: 1.477  loss_cls: 0.3086  loss_box_reg: 0.4851  loss_mask: 0.3165  loss_rpn_cls: 0.09462  loss_rpn_loc: 0.2104  time: 1.5360  data_time: 1.2207  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:25:15 d2.utils.events]: \u001b[0m eta: 0:52:46  iter: 6139  total_loss: 1.429  loss_cls: 0.3591  loss_box_reg: 0.5074  loss_mask: 0.3068  loss_rpn_cls: 0.07257  loss_rpn_loc: 0.1945  time: 1.5354  data_time: 0.6889  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:25:44 d2.utils.events]: \u001b[0m eta: 0:52:20  iter: 6159  total_loss: 1.447  loss_cls: 0.3168  loss_box_reg: 0.5228  loss_mask: 0.3118  loss_rpn_cls: 0.06405  loss_rpn_loc: 0.203  time: 1.5351  data_time: 0.8072  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:26:12 d2.utils.events]: \u001b[0m eta: 0:51:21  iter: 6179  total_loss: 1.42  loss_cls: 0.3106  loss_box_reg: 0.4995  loss_mask: 0.2956  loss_rpn_cls: 0.08311  loss_rpn_loc: 0.1897  time: 1.5346  data_time: 0.7433  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:26:37 d2.utils.events]: \u001b[0m eta: 0:50:59  iter: 6199  total_loss: 1.455  loss_cls: 0.3225  loss_box_reg: 0.51  loss_mask: 0.2956  loss_rpn_cls: 0.0519  loss_rpn_loc: 0.1974  time: 1.5337  data_time: 0.6005  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:27:07 d2.utils.events]: \u001b[0m eta: 0:50:45  iter: 6219  total_loss: 1.312  loss_cls: 0.2975  loss_box_reg: 0.4576  loss_mask: 0.2988  loss_rpn_cls: 0.06821  loss_rpn_loc: 0.1904  time: 1.5336  data_time: 0.8497  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:27:32 d2.utils.events]: \u001b[0m eta: 0:49:50  iter: 6239  total_loss: 1.399  loss_cls: 0.275  loss_box_reg: 0.5069  loss_mask: 0.3025  loss_rpn_cls: 0.05027  loss_rpn_loc: 0.1912  time: 1.5327  data_time: 0.6496  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:28:04 d2.utils.events]: \u001b[0m eta: 0:49:27  iter: 6259  total_loss: 1.461  loss_cls: 0.3574  loss_box_reg: 0.5064  loss_mask: 0.2971  loss_rpn_cls: 0.08711  loss_rpn_loc: 0.1942  time: 1.5329  data_time: 0.9418  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:28:38 d2.utils.events]: \u001b[0m eta: 0:49:56  iter: 6279  total_loss: 1.384  loss_cls: 0.3006  loss_box_reg: 0.4687  loss_mask: 0.29  loss_rpn_cls: 0.07582  loss_rpn_loc: 0.2015  time: 1.5335  data_time: 1.0696  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:28:55 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_valid_f4.json takes 1.10 seconds.\n",
      "\u001b[32m[04/12 23:28:55 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:28:55 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 23:28:55 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 23:28:55 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 23:28:56 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:28:57 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 23:29:00 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0073 s/iter. Inference: 0.1200 s/iter. Eval: 0.0900 s/iter. Total: 0.2174 s/iter. ETA=0:00:23\n",
      "\u001b[32m[04/12 23:29:05 d2.evaluation.evaluator]: \u001b[0mInference done 48/121. Dataloading: 0.0028 s/iter. Inference: 0.0968 s/iter. Eval: 0.0486 s/iter. Total: 0.1483 s/iter. ETA=0:00:10\n",
      "\u001b[32m[04/12 23:29:10 d2.evaluation.evaluator]: \u001b[0mInference done 87/121. Dataloading: 0.0024 s/iter. Inference: 0.0941 s/iter. Eval: 0.0433 s/iter. Total: 0.1399 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 23:29:15 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:16.524137 (0.142449 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:29:15 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.094750 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:29:15 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 23:29:15 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.26095423395089534\n",
      "\u001b[32m[04/12 23:29:15 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.26095, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/12 23:29:25 d2.utils.events]: \u001b[0m eta: 0:49:44  iter: 6299  total_loss: 1.393  loss_cls: 0.3221  loss_box_reg: 0.4938  loss_mask: 0.2953  loss_rpn_cls: 0.08293  loss_rpn_loc: 0.1844  time: 1.5325  data_time: 0.5843  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:29:54 d2.utils.events]: \u001b[0m eta: 0:49:06  iter: 6319  total_loss: 1.321  loss_cls: 0.2702  loss_box_reg: 0.4794  loss_mask: 0.2905  loss_rpn_cls: 0.08255  loss_rpn_loc: 0.1851  time: 1.5323  data_time: 0.8370  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:30:28 d2.utils.events]: \u001b[0m eta: 0:48:30  iter: 6339  total_loss: 1.392  loss_cls: 0.2945  loss_box_reg: 0.4761  loss_mask: 0.2936  loss_rpn_cls: 0.06667  loss_rpn_loc: 0.2051  time: 1.5328  data_time: 1.0552  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:30:58 d2.utils.events]: \u001b[0m eta: 0:48:12  iter: 6359  total_loss: 1.387  loss_cls: 0.2943  loss_box_reg: 0.5115  loss_mask: 0.2969  loss_rpn_cls: 0.05651  loss_rpn_loc: 0.1951  time: 1.5327  data_time: 0.8971  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:31:24 d2.utils.events]: \u001b[0m eta: 0:47:48  iter: 6379  total_loss: 1.516  loss_cls: 0.374  loss_box_reg: 0.5282  loss_mask: 0.3012  loss_rpn_cls: 0.06672  loss_rpn_loc: 0.1823  time: 1.5320  data_time: 0.6769  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:31:54 d2.utils.events]: \u001b[0m eta: 0:47:32  iter: 6399  total_loss: 1.354  loss_cls: 0.331  loss_box_reg: 0.5246  loss_mask: 0.2987  loss_rpn_cls: 0.07291  loss_rpn_loc: 0.1836  time: 1.5319  data_time: 0.8226  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:32:26 d2.utils.events]: \u001b[0m eta: 0:47:13  iter: 6419  total_loss: 1.403  loss_cls: 0.3123  loss_box_reg: 0.4845  loss_mask: 0.2846  loss_rpn_cls: 0.09581  loss_rpn_loc: 0.2045  time: 1.5320  data_time: 0.9151  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:32:56 d2.utils.events]: \u001b[0m eta: 0:47:08  iter: 6439  total_loss: 1.364  loss_cls: 0.3032  loss_box_reg: 0.4836  loss_mask: 0.2907  loss_rpn_cls: 0.05406  loss_rpn_loc: 0.1917  time: 1.5319  data_time: 0.8407  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:33:24 d2.utils.events]: \u001b[0m eta: 0:46:55  iter: 6459  total_loss: 1.332  loss_cls: 0.3016  loss_box_reg: 0.5022  loss_mask: 0.2989  loss_rpn_cls: 0.06017  loss_rpn_loc: 0.1875  time: 1.5315  data_time: 0.7338  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:33:55 d2.utils.events]: \u001b[0m eta: 0:46:43  iter: 6479  total_loss: 1.4  loss_cls: 0.3331  loss_box_reg: 0.4941  loss_mask: 0.2954  loss_rpn_cls: 0.07673  loss_rpn_loc: 0.2023  time: 1.5316  data_time: 0.9003  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:34:29 d2.utils.events]: \u001b[0m eta: 0:46:33  iter: 6499  total_loss: 1.457  loss_cls: 0.3302  loss_box_reg: 0.5167  loss_mask: 0.3112  loss_rpn_cls: 0.08112  loss_rpn_loc: 0.1939  time: 1.5322  data_time: 1.0785  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:35:04 d2.utils.events]: \u001b[0m eta: 0:46:23  iter: 6519  total_loss: 1.386  loss_cls: 0.3176  loss_box_reg: 0.5144  loss_mask: 0.3115  loss_rpn_cls: 0.08478  loss_rpn_loc: 0.1905  time: 1.5328  data_time: 1.0868  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:35:27 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:35:27 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 23:35:27 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 23:35:28 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 23:35:28 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:35:29 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 23:35:31 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0019 s/iter. Inference: 0.1005 s/iter. Eval: 0.0491 s/iter. Total: 0.1515 s/iter. ETA=0:00:16\n",
      "\u001b[32m[04/12 23:35:36 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0019 s/iter. Inference: 0.0922 s/iter. Eval: 0.0381 s/iter. Total: 0.1323 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 23:35:41 d2.evaluation.evaluator]: \u001b[0mInference done 86/121. Dataloading: 0.0022 s/iter. Inference: 0.0932 s/iter. Eval: 0.0401 s/iter. Total: 0.1356 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 23:35:45 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.524124 (0.133829 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:35:45 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.092455 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:35:45 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 23:35:45 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2572072271468069\n",
      "\u001b[32m[04/12 23:35:45 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25721, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/12 23:35:58 d2.utils.events]: \u001b[0m eta: 0:46:16  iter: 6539  total_loss: 1.433  loss_cls: 0.3573  loss_box_reg: 0.5078  loss_mask: 0.3015  loss_rpn_cls: 0.09504  loss_rpn_loc: 0.2255  time: 1.5335  data_time: 1.0942  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:36:34 d2.utils.events]: \u001b[0m eta: 0:46:27  iter: 6559  total_loss: 1.366  loss_cls: 0.3052  loss_box_reg: 0.4928  loss_mask: 0.2999  loss_rpn_cls: 0.07164  loss_rpn_loc: 0.1937  time: 1.5342  data_time: 1.0923  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:37:04 d2.utils.events]: \u001b[0m eta: 0:46:11  iter: 6579  total_loss: 1.421  loss_cls: 0.3583  loss_box_reg: 0.5009  loss_mask: 0.2894  loss_rpn_cls: 0.07398  loss_rpn_loc: 0.208  time: 1.5342  data_time: 0.8687  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:37:34 d2.utils.events]: \u001b[0m eta: 0:45:44  iter: 6599  total_loss: 1.288  loss_cls: 0.3008  loss_box_reg: 0.4804  loss_mask: 0.2965  loss_rpn_cls: 0.06029  loss_rpn_loc: 0.1946  time: 1.5340  data_time: 0.8308  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:38:00 d2.utils.events]: \u001b[0m eta: 0:45:46  iter: 6619  total_loss: 1.397  loss_cls: 0.3182  loss_box_reg: 0.5042  loss_mask: 0.2941  loss_rpn_cls: 0.06297  loss_rpn_loc: 0.1973  time: 1.5333  data_time: 0.6792  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:38:24 d2.utils.events]: \u001b[0m eta: 0:45:11  iter: 6639  total_loss: 1.399  loss_cls: 0.315  loss_box_reg: 0.5257  loss_mask: 0.3001  loss_rpn_cls: 0.05578  loss_rpn_loc: 0.1924  time: 1.5323  data_time: 0.5828  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:38:55 d2.utils.events]: \u001b[0m eta: 0:44:52  iter: 6659  total_loss: 1.425  loss_cls: 0.3521  loss_box_reg: 0.5056  loss_mask: 0.2939  loss_rpn_cls: 0.07053  loss_rpn_loc: 0.1878  time: 1.5324  data_time: 0.9189  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:39:24 d2.utils.events]: \u001b[0m eta: 0:44:15  iter: 6679  total_loss: 1.317  loss_cls: 0.2864  loss_box_reg: 0.5124  loss_mask: 0.2848  loss_rpn_cls: 0.04652  loss_rpn_loc: 0.1735  time: 1.5321  data_time: 0.8125  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:39:52 d2.utils.events]: \u001b[0m eta: 0:44:20  iter: 6699  total_loss: 1.463  loss_cls: 0.3316  loss_box_reg: 0.5146  loss_mask: 0.3101  loss_rpn_cls: 0.08568  loss_rpn_loc: 0.2124  time: 1.5318  data_time: 0.7795  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:40:22 d2.utils.events]: \u001b[0m eta: 0:44:07  iter: 6719  total_loss: 1.404  loss_cls: 0.3051  loss_box_reg: 0.4944  loss_mask: 0.3172  loss_rpn_cls: 0.06394  loss_rpn_loc: 0.1819  time: 1.5316  data_time: 0.8244  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:40:53 d2.utils.events]: \u001b[0m eta: 0:43:46  iter: 6739  total_loss: 1.288  loss_cls: 0.2776  loss_box_reg: 0.4441  loss_mask: 0.2779  loss_rpn_cls: 0.03708  loss_rpn_loc: 0.1791  time: 1.5316  data_time: 0.9101  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:41:32 d2.utils.events]: \u001b[0m eta: 0:43:44  iter: 6759  total_loss: 1.415  loss_cls: 0.3173  loss_box_reg: 0.481  loss_mask: 0.3071  loss_rpn_cls: 0.07822  loss_rpn_loc: 0.2223  time: 1.5329  data_time: 1.2846  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:41:56 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:41:57 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 23:41:57 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 23:41:57 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 23:41:58 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:41:58 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 23:42:00 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0022 s/iter. Inference: 0.0976 s/iter. Eval: 0.0603 s/iter. Total: 0.1601 s/iter. ETA=0:00:17\n",
      "\u001b[32m[04/12 23:42:06 d2.evaluation.evaluator]: \u001b[0mInference done 47/121. Dataloading: 0.0024 s/iter. Inference: 0.0944 s/iter. Eval: 0.0465 s/iter. Total: 0.1434 s/iter. ETA=0:00:10\n",
      "\u001b[32m[04/12 23:42:11 d2.evaluation.evaluator]: \u001b[0mInference done 85/121. Dataloading: 0.0022 s/iter. Inference: 0.0933 s/iter. Eval: 0.0425 s/iter. Total: 0.1382 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 23:42:15 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.778507 (0.136022 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:42:15 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.092663 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:42:15 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 23:42:15 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.24404489885777253\n",
      "\u001b[32m[04/12 23:42:15 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.24404, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/12 23:42:19 d2.utils.events]: \u001b[0m eta: 0:42:59  iter: 6779  total_loss: 1.422  loss_cls: 0.3179  loss_box_reg: 0.5099  loss_mask: 0.3017  loss_rpn_cls: 0.07846  loss_rpn_loc: 0.2042  time: 1.5324  data_time: 0.7188  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:42:46 d2.utils.events]: \u001b[0m eta: 0:42:25  iter: 6799  total_loss: 1.301  loss_cls: 0.2842  loss_box_reg: 0.4712  loss_mask: 0.291  loss_rpn_cls: 0.05169  loss_rpn_loc: 0.1804  time: 1.5319  data_time: 0.7233  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:43:19 d2.utils.events]: \u001b[0m eta: 0:42:22  iter: 6819  total_loss: 1.39  loss_cls: 0.3091  loss_box_reg: 0.5111  loss_mask: 0.316  loss_rpn_cls: 0.0734  loss_rpn_loc: 0.1983  time: 1.5323  data_time: 1.0069  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:43:55 d2.utils.events]: \u001b[0m eta: 0:42:07  iter: 6839  total_loss: 1.441  loss_cls: 0.3325  loss_box_reg: 0.5046  loss_mask: 0.2979  loss_rpn_cls: 0.08166  loss_rpn_loc: 0.1859  time: 1.5330  data_time: 1.1379  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:44:22 d2.utils.events]: \u001b[0m eta: 0:41:33  iter: 6859  total_loss: 1.444  loss_cls: 0.3014  loss_box_reg: 0.4773  loss_mask: 0.3055  loss_rpn_cls: 0.07453  loss_rpn_loc: 0.1933  time: 1.5326  data_time: 0.7301  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:44:56 d2.utils.events]: \u001b[0m eta: 0:41:27  iter: 6879  total_loss: 1.311  loss_cls: 0.3014  loss_box_reg: 0.4808  loss_mask: 0.2803  loss_rpn_cls: 0.08856  loss_rpn_loc: 0.1742  time: 1.5330  data_time: 0.9934  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:45:26 d2.utils.events]: \u001b[0m eta: 0:41:18  iter: 6899  total_loss: 1.396  loss_cls: 0.3121  loss_box_reg: 0.5153  loss_mask: 0.2911  loss_rpn_cls: 0.06952  loss_rpn_loc: 0.1946  time: 1.5328  data_time: 0.8524  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:46:05 d2.utils.events]: \u001b[0m eta: 0:41:18  iter: 6919  total_loss: 1.383  loss_cls: 0.3364  loss_box_reg: 0.4908  loss_mask: 0.3018  loss_rpn_cls: 0.06813  loss_rpn_loc: 0.2133  time: 1.5340  data_time: 1.2726  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:46:41 d2.utils.events]: \u001b[0m eta: 0:41:16  iter: 6939  total_loss: 1.446  loss_cls: 0.325  loss_box_reg: 0.5096  loss_mask: 0.2981  loss_rpn_cls: 0.08231  loss_rpn_loc: 0.2253  time: 1.5349  data_time: 1.1609  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:47:21 d2.utils.events]: \u001b[0m eta: 0:41:27  iter: 6959  total_loss: 1.466  loss_cls: 0.3383  loss_box_reg: 0.4946  loss_mask: 0.2993  loss_rpn_cls: 0.09141  loss_rpn_loc: 0.2179  time: 1.5362  data_time: 1.3227  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:48:00 d2.utils.events]: \u001b[0m eta: 0:41:23  iter: 6979  total_loss: 1.466  loss_cls: 0.3363  loss_box_reg: 0.478  loss_mask: 0.3025  loss_rpn_cls: 0.08377  loss_rpn_loc: 0.1946  time: 1.5374  data_time: 1.2552  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:48:29 d2.utils.events]: \u001b[0m eta: 0:40:43  iter: 6999  total_loss: 1.328  loss_cls: 0.2819  loss_box_reg: 0.4765  loss_mask: 0.3012  loss_rpn_cls: 0.05157  loss_rpn_loc: 0.1804  time: 1.5371  data_time: 0.8141  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:48:50 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:48:50 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 23:48:50 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 23:48:51 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 23:48:51 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:48:51 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 23:48:53 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0015 s/iter. Inference: 0.0892 s/iter. Eval: 0.0338 s/iter. Total: 0.1245 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 23:48:58 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0021 s/iter. Inference: 0.0910 s/iter. Eval: 0.0364 s/iter. Total: 0.1296 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 23:49:03 d2.evaluation.evaluator]: \u001b[0mInference done 84/121. Dataloading: 0.0024 s/iter. Inference: 0.0935 s/iter. Eval: 0.0423 s/iter. Total: 0.1383 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/12 23:49:08 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.696678 (0.135316 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:49:08 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.092524 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:49:08 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 23:49:08 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.25450331124644227\n",
      "\u001b[32m[04/12 23:49:08 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25450, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/12 23:49:09 d2.utils.events]: \u001b[0m eta: 0:40:11  iter: 7019  total_loss: 1.277  loss_cls: 0.2988  loss_box_reg: 0.483  loss_mask: 0.278  loss_rpn_cls: 0.0544  loss_rpn_loc: 0.1678  time: 1.5358  data_time: 0.4522  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:49:39 d2.utils.events]: \u001b[0m eta: 0:39:55  iter: 7039  total_loss: 1.317  loss_cls: 0.2963  loss_box_reg: 0.4595  loss_mask: 0.295  loss_rpn_cls: 0.04533  loss_rpn_loc: 0.1676  time: 1.5357  data_time: 0.8774  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:50:11 d2.utils.events]: \u001b[0m eta: 0:39:39  iter: 7059  total_loss: 1.438  loss_cls: 0.3481  loss_box_reg: 0.4995  loss_mask: 0.3149  loss_rpn_cls: 0.08921  loss_rpn_loc: 0.198  time: 1.5359  data_time: 0.9507  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:50:49 d2.utils.events]: \u001b[0m eta: 0:39:12  iter: 7079  total_loss: 1.374  loss_cls: 0.3176  loss_box_reg: 0.4932  loss_mask: 0.2922  loss_rpn_cls: 0.08724  loss_rpn_loc: 0.208  time: 1.5369  data_time: 1.2628  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:51:20 d2.utils.events]: \u001b[0m eta: 0:38:54  iter: 7099  total_loss: 1.3  loss_cls: 0.305  loss_box_reg: 0.4972  loss_mask: 0.2903  loss_rpn_cls: 0.05942  loss_rpn_loc: 0.1778  time: 1.5369  data_time: 0.8899  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:51:53 d2.utils.events]: \u001b[0m eta: 0:38:25  iter: 7119  total_loss: 1.383  loss_cls: 0.3271  loss_box_reg: 0.4695  loss_mask: 0.3022  loss_rpn_cls: 0.0921  loss_rpn_loc: 0.1971  time: 1.5372  data_time: 0.9885  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:52:15 d2.utils.events]: \u001b[0m eta: 0:37:58  iter: 7139  total_loss: 1.308  loss_cls: 0.2838  loss_box_reg: 0.5009  loss_mask: 0.2903  loss_rpn_cls: 0.05161  loss_rpn_loc: 0.166  time: 1.5360  data_time: 0.5019  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:52:43 d2.utils.events]: \u001b[0m eta: 0:37:50  iter: 7159  total_loss: 1.396  loss_cls: 0.294  loss_box_reg: 0.4831  loss_mask: 0.2911  loss_rpn_cls: 0.06144  loss_rpn_loc: 0.1846  time: 1.5356  data_time: 0.7228  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:53:11 d2.utils.events]: \u001b[0m eta: 0:37:35  iter: 7179  total_loss: 1.287  loss_cls: 0.255  loss_box_reg: 0.4645  loss_mask: 0.2881  loss_rpn_cls: 0.06011  loss_rpn_loc: 0.1686  time: 1.5353  data_time: 0.7672  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:53:38 d2.utils.events]: \u001b[0m eta: 0:37:21  iter: 7199  total_loss: 1.419  loss_cls: 0.3379  loss_box_reg: 0.4845  loss_mask: 0.294  loss_rpn_cls: 0.0554  loss_rpn_loc: 0.2049  time: 1.5347  data_time: 0.7125  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:54:13 d2.utils.events]: \u001b[0m eta: 0:37:08  iter: 7219  total_loss: 1.355  loss_cls: 0.3128  loss_box_reg: 0.4729  loss_mask: 0.3176  loss_rpn_cls: 0.0744  loss_rpn_loc: 0.2029  time: 1.5353  data_time: 1.1151  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:54:41 d2.utils.events]: \u001b[0m eta: 0:37:00  iter: 7239  total_loss: 1.385  loss_cls: 0.3171  loss_box_reg: 0.5005  loss_mask: 0.297  loss_rpn_cls: 0.0755  loss_rpn_loc: 0.2013  time: 1.5350  data_time: 0.7843  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:55:21 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:55:22 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/12 23:55:22 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/12 23:55:22 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/12 23:55:22 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/12 23:55:23 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/12 23:55:25 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0018 s/iter. Inference: 0.0894 s/iter. Eval: 0.0331 s/iter. Total: 0.1243 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/12 23:55:30 d2.evaluation.evaluator]: \u001b[0mInference done 49/121. Dataloading: 0.0025 s/iter. Inference: 0.0915 s/iter. Eval: 0.0383 s/iter. Total: 0.1323 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/12 23:55:35 d2.evaluation.evaluator]: \u001b[0mInference done 88/121. Dataloading: 0.0023 s/iter. Inference: 0.0912 s/iter. Eval: 0.0378 s/iter. Total: 0.1314 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/12 23:55:40 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.819097 (0.136372 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:55:40 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.093155 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/12 23:55:40 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/12 23:55:40 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.25867983724495314\n",
      "\u001b[32m[04/12 23:55:40 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25868, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/12 23:55:40 d2.utils.events]: \u001b[0m eta: 0:36:54  iter: 7259  total_loss: 1.477  loss_cls: 0.3435  loss_box_reg: 0.4905  loss_mask: 0.3049  loss_rpn_cls: 0.09059  loss_rpn_loc: 0.2017  time: 1.5361  data_time: 1.2746  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:56:06 d2.utils.events]: \u001b[0m eta: 0:36:25  iter: 7279  total_loss: 1.414  loss_cls: 0.3495  loss_box_reg: 0.5183  loss_mask: 0.2977  loss_rpn_cls: 0.06315  loss_rpn_loc: 0.1943  time: 1.5355  data_time: 0.6771  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:56:39 d2.utils.events]: \u001b[0m eta: 0:36:13  iter: 7299  total_loss: 1.362  loss_cls: 0.2892  loss_box_reg: 0.471  loss_mask: 0.2899  loss_rpn_cls: 0.05718  loss_rpn_loc: 0.1915  time: 1.5358  data_time: 1.0077  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:57:09 d2.utils.events]: \u001b[0m eta: 0:35:53  iter: 7319  total_loss: 1.369  loss_cls: 0.2986  loss_box_reg: 0.4748  loss_mask: 0.2991  loss_rpn_cls: 0.07245  loss_rpn_loc: 0.1847  time: 1.5357  data_time: 0.8846  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:57:53 d2.utils.events]: \u001b[0m eta: 0:35:44  iter: 7339  total_loss: 1.441  loss_cls: 0.357  loss_box_reg: 0.4738  loss_mask: 0.3062  loss_rpn_cls: 0.09347  loss_rpn_loc: 0.1968  time: 1.5375  data_time: 1.4787  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:58:17 d2.utils.events]: \u001b[0m eta: 0:35:26  iter: 7359  total_loss: 1.393  loss_cls: 0.3245  loss_box_reg: 0.5187  loss_mask: 0.3078  loss_rpn_cls: 0.0677  loss_rpn_loc: 0.193  time: 1.5366  data_time: 0.5842  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:58:43 d2.utils.events]: \u001b[0m eta: 0:35:12  iter: 7379  total_loss: 1.383  loss_cls: 0.2846  loss_box_reg: 0.5069  loss_mask: 0.3008  loss_rpn_cls: 0.05836  loss_rpn_loc: 0.1876  time: 1.5359  data_time: 0.6296  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:59:21 d2.utils.events]: \u001b[0m eta: 0:35:03  iter: 7399  total_loss: 1.504  loss_cls: 0.3635  loss_box_reg: 0.5093  loss_mask: 0.3117  loss_rpn_cls: 0.09855  loss_rpn_loc: 0.1913  time: 1.5369  data_time: 1.2784  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/12 23:59:55 d2.utils.events]: \u001b[0m eta: 0:34:47  iter: 7419  total_loss: 1.319  loss_cls: 0.2848  loss_box_reg: 0.4813  loss_mask: 0.2957  loss_rpn_cls: 0.06321  loss_rpn_loc: 0.1807  time: 1.5374  data_time: 1.0574  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:00:27 d2.utils.events]: \u001b[0m eta: 0:34:24  iter: 7439  total_loss: 1.358  loss_cls: 0.3162  loss_box_reg: 0.4736  loss_mask: 0.287  loss_rpn_cls: 0.0765  loss_rpn_loc: 0.1848  time: 1.5375  data_time: 0.9483  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:00:50 d2.utils.events]: \u001b[0m eta: 0:34:10  iter: 7459  total_loss: 1.329  loss_cls: 0.2965  loss_box_reg: 0.4823  loss_mask: 0.2924  loss_rpn_cls: 0.05009  loss_rpn_loc: 0.1772  time: 1.5365  data_time: 0.5231  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:01:20 d2.utils.events]: \u001b[0m eta: 0:33:50  iter: 7479  total_loss: 1.388  loss_cls: 0.3148  loss_box_reg: 0.4933  loss_mask: 0.2882  loss_rpn_cls: 0.06897  loss_rpn_loc: 0.1853  time: 1.5363  data_time: 0.8571  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:01:59 d2.utils.events]: \u001b[0m eta: 0:33:32  iter: 7499  total_loss: 1.418  loss_cls: 0.3315  loss_box_reg: 0.4821  loss_mask: 0.2993  loss_rpn_cls: 0.095  loss_rpn_loc: 0.2116  time: 1.5375  data_time: 1.2619  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:02:05 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:02:05 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 00:02:05 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 00:02:05 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 00:02:05 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:02:06 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 00:02:07 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0015 s/iter. Inference: 0.0924 s/iter. Eval: 0.0392 s/iter. Total: 0.1332 s/iter. ETA=0:00:14\n",
      "\u001b[32m[04/13 00:02:13 d2.evaluation.evaluator]: \u001b[0mInference done 48/121. Dataloading: 0.0019 s/iter. Inference: 0.0924 s/iter. Eval: 0.0409 s/iter. Total: 0.1353 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/13 00:02:18 d2.evaluation.evaluator]: \u001b[0mInference done 81/121. Dataloading: 0.0021 s/iter. Inference: 0.0955 s/iter. Eval: 0.0457 s/iter. Total: 0.1434 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/13 00:02:23 d2.evaluation.evaluator]: \u001b[0mInference done 119/121. Dataloading: 0.0020 s/iter. Inference: 0.0941 s/iter. Eval: 0.0434 s/iter. Total: 0.1396 s/iter. ETA=0:00:00\n",
      "\u001b[32m[04/13 00:02:23 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:16.265584 (0.140221 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:02:23 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.094132 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:02:23 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 00:02:23 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.22085522925763298\n",
      "\u001b[32m[04/13 00:02:23 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.22086, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 00:02:59 d2.utils.events]: \u001b[0m eta: 0:33:16  iter: 7519  total_loss: 1.564  loss_cls: 0.3729  loss_box_reg: 0.5036  loss_mask: 0.3051  loss_rpn_cls: 0.08456  loss_rpn_loc: 0.2335  time: 1.5388  data_time: 1.3476  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:03:26 d2.utils.events]: \u001b[0m eta: 0:32:56  iter: 7539  total_loss: 1.335  loss_cls: 0.2883  loss_box_reg: 0.4936  loss_mask: 0.2993  loss_rpn_cls: 0.04474  loss_rpn_loc: 0.1919  time: 1.5383  data_time: 0.7291  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:03:54 d2.utils.events]: \u001b[0m eta: 0:32:33  iter: 7559  total_loss: 1.328  loss_cls: 0.2935  loss_box_reg: 0.4836  loss_mask: 0.2824  loss_rpn_cls: 0.04526  loss_rpn_loc: 0.1659  time: 1.5379  data_time: 0.7360  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:04:24 d2.utils.events]: \u001b[0m eta: 0:32:17  iter: 7579  total_loss: 1.318  loss_cls: 0.3  loss_box_reg: 0.4731  loss_mask: 0.2913  loss_rpn_cls: 0.07163  loss_rpn_loc: 0.1849  time: 1.5379  data_time: 0.8588  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:04:51 d2.utils.events]: \u001b[0m eta: 0:32:07  iter: 7599  total_loss: 1.404  loss_cls: 0.3161  loss_box_reg: 0.4863  loss_mask: 0.2899  loss_rpn_cls: 0.1058  loss_rpn_loc: 0.2002  time: 1.5374  data_time: 0.7124  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:05:30 d2.utils.events]: \u001b[0m eta: 0:31:57  iter: 7619  total_loss: 1.384  loss_cls: 0.3088  loss_box_reg: 0.4659  loss_mask: 0.294  loss_rpn_cls: 0.086  loss_rpn_loc: 0.2031  time: 1.5385  data_time: 1.2881  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:06:11 d2.utils.events]: \u001b[0m eta: 0:32:15  iter: 7639  total_loss: 1.468  loss_cls: 0.3395  loss_box_reg: 0.4715  loss_mask: 0.2914  loss_rpn_cls: 0.0914  loss_rpn_loc: 0.2119  time: 1.5398  data_time: 1.3475  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:06:31 d2.utils.events]: \u001b[0m eta: 0:31:58  iter: 7659  total_loss: 1.323  loss_cls: 0.3057  loss_box_reg: 0.4938  loss_mask: 0.2945  loss_rpn_cls: 0.04512  loss_rpn_loc: 0.1792  time: 1.5384  data_time: 0.4047  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:06:59 d2.utils.events]: \u001b[0m eta: 0:31:44  iter: 7679  total_loss: 1.354  loss_cls: 0.2962  loss_box_reg: 0.4941  loss_mask: 0.303  loss_rpn_cls: 0.05986  loss_rpn_loc: 0.1931  time: 1.5380  data_time: 0.7237  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:07:34 d2.utils.events]: \u001b[0m eta: 0:31:29  iter: 7699  total_loss: 1.456  loss_cls: 0.3499  loss_box_reg: 0.507  loss_mask: 0.3018  loss_rpn_cls: 0.1039  loss_rpn_loc: 0.2109  time: 1.5386  data_time: 1.0913  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:08:16 d2.utils.events]: \u001b[0m eta: 0:31:26  iter: 7719  total_loss: 1.376  loss_cls: 0.3076  loss_box_reg: 0.4707  loss_mask: 0.3071  loss_rpn_cls: 0.0949  loss_rpn_loc: 0.2068  time: 1.5400  data_time: 1.3925  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:08:51 d2.utils.events]: \u001b[0m eta: 0:32:03  iter: 7739  total_loss: 1.518  loss_cls: 0.3262  loss_box_reg: 0.5332  loss_mask: 0.3049  loss_rpn_cls: 0.08953  loss_rpn_loc: 0.2198  time: 1.5406  data_time: 1.0680  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:08:57 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:08:57 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 00:08:57 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 00:08:57 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 00:08:58 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:08:58 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 00:09:01 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0045 s/iter. Inference: 0.1053 s/iter. Eval: 0.0652 s/iter. Total: 0.1751 s/iter. ETA=0:00:19\n",
      "\u001b[32m[04/13 00:09:06 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0023 s/iter. Inference: 0.0925 s/iter. Eval: 0.0402 s/iter. Total: 0.1350 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/13 00:09:11 d2.evaluation.evaluator]: \u001b[0mInference done 88/121. Dataloading: 0.0022 s/iter. Inference: 0.0922 s/iter. Eval: 0.0404 s/iter. Total: 0.1350 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/13 00:09:15 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.548244 (0.134037 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:09:15 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091777 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:09:15 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 00:09:15 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.25171209818743084\n",
      "\u001b[32m[04/13 00:09:15 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25171, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 00:09:28 d2.utils.events]: \u001b[0m eta: 0:31:08  iter: 7759  total_loss: 1.306  loss_cls: 0.2741  loss_box_reg: 0.502  loss_mask: 0.2901  loss_rpn_cls: 0.04004  loss_rpn_loc: 0.1768  time: 1.5389  data_time: 0.2746  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:10:00 d2.utils.events]: \u001b[0m eta: 0:30:55  iter: 7779  total_loss: 1.404  loss_cls: 0.3284  loss_box_reg: 0.4899  loss_mask: 0.2923  loss_rpn_cls: 0.07667  loss_rpn_loc: 0.1941  time: 1.5390  data_time: 0.9375  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:10:21 d2.utils.events]: \u001b[0m eta: 0:30:29  iter: 7799  total_loss: 1.308  loss_cls: 0.2699  loss_box_reg: 0.4826  loss_mask: 0.2967  loss_rpn_cls: 0.05482  loss_rpn_loc: 0.1622  time: 1.5378  data_time: 0.4310  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:10:59 d2.utils.events]: \u001b[0m eta: 0:30:18  iter: 7819  total_loss: 1.371  loss_cls: 0.3117  loss_box_reg: 0.4664  loss_mask: 0.2915  loss_rpn_cls: 0.08668  loss_rpn_loc: 0.2161  time: 1.5388  data_time: 1.2376  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:11:26 d2.utils.events]: \u001b[0m eta: 0:29:47  iter: 7839  total_loss: 1.26  loss_cls: 0.2884  loss_box_reg: 0.4815  loss_mask: 0.295  loss_rpn_cls: 0.05916  loss_rpn_loc: 0.1783  time: 1.5383  data_time: 0.7188  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:11:59 d2.utils.events]: \u001b[0m eta: 0:29:48  iter: 7859  total_loss: 1.469  loss_cls: 0.3141  loss_box_reg: 0.4888  loss_mask: 0.3114  loss_rpn_cls: 0.07183  loss_rpn_loc: 0.2012  time: 1.5385  data_time: 0.9887  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:12:38 d2.utils.events]: \u001b[0m eta: 0:29:46  iter: 7879  total_loss: 1.467  loss_cls: 0.336  loss_box_reg: 0.5011  loss_mask: 0.334  loss_rpn_cls: 0.07825  loss_rpn_loc: 0.2213  time: 1.5395  data_time: 1.2518  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:13:18 d2.utils.events]: \u001b[0m eta: 0:29:27  iter: 7899  total_loss: 1.415  loss_cls: 0.2881  loss_box_reg: 0.5096  loss_mask: 0.3014  loss_rpn_cls: 0.06492  loss_rpn_loc: 0.1934  time: 1.5408  data_time: 1.3261  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:13:47 d2.utils.events]: \u001b[0m eta: 0:28:55  iter: 7919  total_loss: 1.342  loss_cls: 0.2813  loss_box_reg: 0.4708  loss_mask: 0.2911  loss_rpn_cls: 0.0734  loss_rpn_loc: 0.1807  time: 1.5405  data_time: 0.8401  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:14:20 d2.utils.events]: \u001b[0m eta: 0:28:32  iter: 7939  total_loss: 1.313  loss_cls: 0.2566  loss_box_reg: 0.4824  loss_mask: 0.2828  loss_rpn_cls: 0.05829  loss_rpn_loc: 0.1745  time: 1.5408  data_time: 0.9956  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:14:45 d2.utils.events]: \u001b[0m eta: 0:27:55  iter: 7959  total_loss: 1.378  loss_cls: 0.3045  loss_box_reg: 0.4953  loss_mask: 0.2938  loss_rpn_cls: 0.07056  loss_rpn_loc: 0.202  time: 1.5400  data_time: 0.5953  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:15:18 d2.utils.events]: \u001b[0m eta: 0:27:37  iter: 7979  total_loss: 1.319  loss_cls: 0.3164  loss_box_reg: 0.4941  loss_mask: 0.2805  loss_rpn_cls: 0.07094  loss_rpn_loc: 0.1894  time: 1.5403  data_time: 1.0131  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:15:31 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:15:33 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 00:15:33 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 00:15:33 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 00:15:33 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:15:34 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 00:15:36 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0035 s/iter. Inference: 0.0995 s/iter. Eval: 0.0587 s/iter. Total: 0.1618 s/iter. ETA=0:00:17\n",
      "\u001b[32m[04/13 00:15:41 d2.evaluation.evaluator]: \u001b[0mInference done 45/121. Dataloading: 0.0026 s/iter. Inference: 0.0975 s/iter. Eval: 0.0507 s/iter. Total: 0.1510 s/iter. ETA=0:00:11\n",
      "\u001b[32m[04/13 00:15:46 d2.evaluation.evaluator]: \u001b[0mInference done 84/121. Dataloading: 0.0023 s/iter. Inference: 0.0940 s/iter. Eval: 0.0436 s/iter. Total: 0.1401 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/13 00:15:51 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:16.014812 (0.138059 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:15:51 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.093160 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:15:51 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 00:15:51 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.25782111429095833\n",
      "\u001b[32m[04/13 00:15:51 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25782, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 00:16:07 d2.utils.events]: \u001b[0m eta: 0:27:22  iter: 7999  total_loss: 1.415  loss_cls: 0.3053  loss_box_reg: 0.5088  loss_mask: 0.3002  loss_rpn_cls: 0.06367  loss_rpn_loc: 0.2028  time: 1.5401  data_time: 0.8173  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:16:39 d2.utils.events]: \u001b[0m eta: 0:27:10  iter: 8019  total_loss: 1.319  loss_cls: 0.2921  loss_box_reg: 0.4983  loss_mask: 0.2839  loss_rpn_cls: 0.04707  loss_rpn_loc: 0.1764  time: 1.5402  data_time: 0.9099  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:17:13 d2.utils.events]: \u001b[0m eta: 0:27:11  iter: 8039  total_loss: 1.424  loss_cls: 0.3214  loss_box_reg: 0.5145  loss_mask: 0.2996  loss_rpn_cls: 0.08509  loss_rpn_loc: 0.1888  time: 1.5406  data_time: 1.0021  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:17:42 d2.utils.events]: \u001b[0m eta: 0:26:52  iter: 8059  total_loss: 1.382  loss_cls: 0.3197  loss_box_reg: 0.4932  loss_mask: 0.2823  loss_rpn_cls: 0.07162  loss_rpn_loc: 0.1894  time: 1.5404  data_time: 0.8478  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:18:19 d2.utils.events]: \u001b[0m eta: 0:26:21  iter: 8079  total_loss: 1.317  loss_cls: 0.2581  loss_box_reg: 0.4421  loss_mask: 0.3013  loss_rpn_cls: 0.08548  loss_rpn_loc: 0.1979  time: 1.5412  data_time: 1.2364  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:18:55 d2.utils.events]: \u001b[0m eta: 0:26:12  iter: 8099  total_loss: 1.397  loss_cls: 0.3192  loss_box_reg: 0.497  loss_mask: 0.2974  loss_rpn_cls: 0.06593  loss_rpn_loc: 0.1946  time: 1.5418  data_time: 1.1254  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:19:22 d2.utils.events]: \u001b[0m eta: 0:25:48  iter: 8119  total_loss: 1.324  loss_cls: 0.2729  loss_box_reg: 0.4706  loss_mask: 0.3032  loss_rpn_cls: 0.05727  loss_rpn_loc: 0.1842  time: 1.5414  data_time: 0.7068  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:19:50 d2.utils.events]: \u001b[0m eta: 0:25:46  iter: 8139  total_loss: 1.363  loss_cls: 0.31  loss_box_reg: 0.4873  loss_mask: 0.2948  loss_rpn_cls: 0.07591  loss_rpn_loc: 0.1989  time: 1.5409  data_time: 0.7251  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:20:20 d2.utils.events]: \u001b[0m eta: 0:25:22  iter: 8159  total_loss: 1.307  loss_cls: 0.2617  loss_box_reg: 0.4704  loss_mask: 0.2876  loss_rpn_cls: 0.07529  loss_rpn_loc: 0.1807  time: 1.5408  data_time: 0.8538  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:20:48 d2.utils.events]: \u001b[0m eta: 0:25:09  iter: 8179  total_loss: 1.325  loss_cls: 0.2843  loss_box_reg: 0.4819  loss_mask: 0.2948  loss_rpn_cls: 0.06514  loss_rpn_loc: 0.1903  time: 1.5405  data_time: 0.7695  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:21:24 d2.utils.events]: \u001b[0m eta: 0:25:11  iter: 8199  total_loss: 1.455  loss_cls: 0.3351  loss_box_reg: 0.4901  loss_mask: 0.3041  loss_rpn_cls: 0.09271  loss_rpn_loc: 0.2157  time: 1.5411  data_time: 1.1023  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:21:59 d2.utils.events]: \u001b[0m eta: 0:24:36  iter: 8219  total_loss: 1.371  loss_cls: 0.2882  loss_box_reg: 0.4679  loss_mask: 0.297  loss_rpn_cls: 0.08239  loss_rpn_loc: 0.1941  time: 1.5417  data_time: 1.1210  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:22:09 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:22:10 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 00:22:10 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 00:22:10 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 00:22:10 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:22:11 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 00:22:12 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0042 s/iter. Inference: 0.0991 s/iter. Eval: 0.0470 s/iter. Total: 0.1502 s/iter. ETA=0:00:16\n",
      "\u001b[32m[04/13 00:22:17 d2.evaluation.evaluator]: \u001b[0mInference done 49/121. Dataloading: 0.0024 s/iter. Inference: 0.0929 s/iter. Eval: 0.0400 s/iter. Total: 0.1354 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/13 00:22:23 d2.evaluation.evaluator]: \u001b[0mInference done 88/121. Dataloading: 0.0024 s/iter. Inference: 0.0919 s/iter. Eval: 0.0387 s/iter. Total: 0.1330 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/13 00:22:27 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.480963 (0.133457 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:22:27 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091740 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:22:27 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 00:22:27 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2590266093324461\n",
      "\u001b[32m[04/13 00:22:27 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25903, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 00:22:44 d2.utils.events]: \u001b[0m eta: 0:24:14  iter: 8239  total_loss: 1.398  loss_cls: 0.3273  loss_box_reg: 0.5363  loss_mask: 0.307  loss_rpn_cls: 0.05603  loss_rpn_loc: 0.1857  time: 1.5411  data_time: 0.6543  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:23:14 d2.utils.events]: \u001b[0m eta: 0:23:48  iter: 8259  total_loss: 1.24  loss_cls: 0.2554  loss_box_reg: 0.4535  loss_mask: 0.2755  loss_rpn_cls: 0.04999  loss_rpn_loc: 0.1741  time: 1.5410  data_time: 0.8988  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:23:46 d2.utils.events]: \u001b[0m eta: 0:23:31  iter: 8279  total_loss: 1.483  loss_cls: 0.3512  loss_box_reg: 0.5038  loss_mask: 0.315  loss_rpn_cls: 0.08121  loss_rpn_loc: 0.1974  time: 1.5412  data_time: 0.9760  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:24:16 d2.utils.events]: \u001b[0m eta: 0:23:13  iter: 8299  total_loss: 1.317  loss_cls: 0.2646  loss_box_reg: 0.4714  loss_mask: 0.3008  loss_rpn_cls: 0.06502  loss_rpn_loc: 0.1947  time: 1.5411  data_time: 0.8677  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:24:50 d2.utils.events]: \u001b[0m eta: 0:22:59  iter: 8319  total_loss: 1.321  loss_cls: 0.3015  loss_box_reg: 0.4841  loss_mask: 0.2971  loss_rpn_cls: 0.06078  loss_rpn_loc: 0.1792  time: 1.5414  data_time: 1.0054  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:25:24 d2.utils.events]: \u001b[0m eta: 0:22:40  iter: 8339  total_loss: 1.318  loss_cls: 0.2931  loss_box_reg: 0.4692  loss_mask: 0.2874  loss_rpn_cls: 0.06756  loss_rpn_loc: 0.1855  time: 1.5418  data_time: 1.0314  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:25:56 d2.utils.events]: \u001b[0m eta: 0:22:26  iter: 8359  total_loss: 1.318  loss_cls: 0.2596  loss_box_reg: 0.4746  loss_mask: 0.2863  loss_rpn_cls: 0.07312  loss_rpn_loc: 0.1794  time: 1.5420  data_time: 0.9843  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:26:35 d2.utils.events]: \u001b[0m eta: 0:22:13  iter: 8379  total_loss: 1.289  loss_cls: 0.2877  loss_box_reg: 0.4569  loss_mask: 0.2962  loss_rpn_cls: 0.0785  loss_rpn_loc: 0.1933  time: 1.5429  data_time: 1.2510  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:27:03 d2.utils.events]: \u001b[0m eta: 0:21:57  iter: 8399  total_loss: 1.383  loss_cls: 0.3268  loss_box_reg: 0.5274  loss_mask: 0.2927  loss_rpn_cls: 0.0701  loss_rpn_loc: 0.1831  time: 1.5425  data_time: 0.7481  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:27:31 d2.utils.events]: \u001b[0m eta: 0:21:36  iter: 8419  total_loss: 1.388  loss_cls: 0.3312  loss_box_reg: 0.4801  loss_mask: 0.2882  loss_rpn_cls: 0.07603  loss_rpn_loc: 0.1953  time: 1.5422  data_time: 0.7717  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:28:00 d2.utils.events]: \u001b[0m eta: 0:21:20  iter: 8439  total_loss: 1.394  loss_cls: 0.3259  loss_box_reg: 0.5036  loss_mask: 0.3139  loss_rpn_cls: 0.07909  loss_rpn_loc: 0.1943  time: 1.5420  data_time: 0.8068  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:28:37 d2.utils.events]: \u001b[0m eta: 0:21:04  iter: 8459  total_loss: 1.389  loss_cls: 0.3177  loss_box_reg: 0.484  loss_mask: 0.3081  loss_rpn_cls: 0.07565  loss_rpn_loc: 0.2133  time: 1.5427  data_time: 1.2336  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:28:53 d2.data.datasets.coco]: \u001b[0mLoading ../input/sartoriusannotations/annotations_valid_f4.json takes 1.17 seconds.\n",
      "\u001b[32m[04/13 00:28:53 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:28:54 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 00:28:54 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 00:28:54 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 00:28:54 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:28:55 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 00:28:56 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0015 s/iter. Inference: 0.0882 s/iter. Eval: 0.0317 s/iter. Total: 0.1214 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/13 00:29:02 d2.evaluation.evaluator]: \u001b[0mInference done 49/121. Dataloading: 0.0026 s/iter. Inference: 0.0917 s/iter. Eval: 0.0377 s/iter. Total: 0.1321 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/13 00:29:07 d2.evaluation.evaluator]: \u001b[0mInference done 84/121. Dataloading: 0.0076 s/iter. Inference: 0.0916 s/iter. Eval: 0.0377 s/iter. Total: 0.1369 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/13 00:29:12 d2.evaluation.evaluator]: \u001b[0mInference done 119/121. Dataloading: 0.0063 s/iter. Inference: 0.0932 s/iter. Eval: 0.0396 s/iter. Total: 0.1391 s/iter. ETA=0:00:00\n",
      "\u001b[32m[04/13 00:29:12 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:16.306186 (0.140571 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:29:12 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.093246 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:29:12 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 00:29:12 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2636155099290141\n",
      "\u001b[32m[04/13 00:29:12 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.26362, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 00:29:28 d2.utils.events]: \u001b[0m eta: 0:20:47  iter: 8479  total_loss: 1.353  loss_cls: 0.3144  loss_box_reg: 0.4999  loss_mask: 0.2971  loss_rpn_cls: 0.07524  loss_rpn_loc: 0.195  time: 1.5427  data_time: 0.8800  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:29:52 d2.utils.events]: \u001b[0m eta: 0:20:19  iter: 8499  total_loss: 1.296  loss_cls: 0.239  loss_box_reg: 0.4888  loss_mask: 0.2876  loss_rpn_cls: 0.0706  loss_rpn_loc: 0.1808  time: 1.5419  data_time: 0.5829  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:30:37 d2.utils.events]: \u001b[0m eta: 0:20:14  iter: 8519  total_loss: 1.464  loss_cls: 0.3567  loss_box_reg: 0.4777  loss_mask: 0.3015  loss_rpn_cls: 0.09653  loss_rpn_loc: 0.2258  time: 1.5436  data_time: 1.5533  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:31:09 d2.utils.events]: \u001b[0m eta: 0:20:00  iter: 8539  total_loss: 1.272  loss_cls: 0.2889  loss_box_reg: 0.4371  loss_mask: 0.2721  loss_rpn_cls: 0.06712  loss_rpn_loc: 0.1785  time: 1.5437  data_time: 0.9558  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:31:45 d2.utils.events]: \u001b[0m eta: 0:19:46  iter: 8559  total_loss: 1.31  loss_cls: 0.2769  loss_box_reg: 0.4771  loss_mask: 0.2828  loss_rpn_cls: 0.06705  loss_rpn_loc: 0.1817  time: 1.5443  data_time: 1.1377  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:32:12 d2.utils.events]: \u001b[0m eta: 0:19:25  iter: 8579  total_loss: 1.353  loss_cls: 0.2984  loss_box_reg: 0.4976  loss_mask: 0.2952  loss_rpn_cls: 0.06621  loss_rpn_loc: 0.1949  time: 1.5438  data_time: 0.7097  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:32:42 d2.utils.events]: \u001b[0m eta: 0:19:08  iter: 8599  total_loss: 1.356  loss_cls: 0.2775  loss_box_reg: 0.4739  loss_mask: 0.2938  loss_rpn_cls: 0.06497  loss_rpn_loc: 0.2024  time: 1.5437  data_time: 0.8642  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:33:08 d2.utils.events]: \u001b[0m eta: 0:18:41  iter: 8619  total_loss: 1.354  loss_cls: 0.29  loss_box_reg: 0.5009  loss_mask: 0.2815  loss_rpn_cls: 0.0794  loss_rpn_loc: 0.1827  time: 1.5431  data_time: 0.6651  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:33:32 d2.utils.events]: \u001b[0m eta: 0:18:13  iter: 8639  total_loss: 1.37  loss_cls: 0.3009  loss_box_reg: 0.5027  loss_mask: 0.3086  loss_rpn_cls: 0.04145  loss_rpn_loc: 0.1704  time: 1.5424  data_time: 0.5934  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:34:01 d2.utils.events]: \u001b[0m eta: 0:18:00  iter: 8659  total_loss: 1.421  loss_cls: 0.322  loss_box_reg: 0.5442  loss_mask: 0.3068  loss_rpn_cls: 0.04892  loss_rpn_loc: 0.1883  time: 1.5422  data_time: 0.8056  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:34:33 d2.utils.events]: \u001b[0m eta: 0:17:47  iter: 8679  total_loss: 1.303  loss_cls: 0.2955  loss_box_reg: 0.4923  loss_mask: 0.2983  loss_rpn_cls: 0.0766  loss_rpn_loc: 0.1998  time: 1.5423  data_time: 0.9541  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:35:04 d2.utils.events]: \u001b[0m eta: 0:17:25  iter: 8699  total_loss: 1.422  loss_cls: 0.3104  loss_box_reg: 0.489  loss_mask: 0.3025  loss_rpn_cls: 0.07899  loss_rpn_loc: 0.1963  time: 1.5424  data_time: 0.9208  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:35:22 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:35:23 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 00:35:23 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 00:35:23 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 00:35:23 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:35:24 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 00:35:26 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0018 s/iter. Inference: 0.0890 s/iter. Eval: 0.0315 s/iter. Total: 0.1223 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/13 00:35:31 d2.evaluation.evaluator]: \u001b[0mInference done 50/121. Dataloading: 0.0019 s/iter. Inference: 0.0906 s/iter. Eval: 0.0365 s/iter. Total: 0.1291 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/13 00:35:36 d2.evaluation.evaluator]: \u001b[0mInference done 87/121. Dataloading: 0.0022 s/iter. Inference: 0.0911 s/iter. Eval: 0.0385 s/iter. Total: 0.1319 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/13 00:35:41 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.292927 (0.131836 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:35:41 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.091152 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:35:41 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 00:35:41 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.262396004141696\n",
      "\u001b[32m[04/13 00:35:41 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.26240, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 00:35:58 d2.utils.events]: \u001b[0m eta: 0:17:11  iter: 8719  total_loss: 1.392  loss_cls: 0.2974  loss_box_reg: 0.4804  loss_mask: 0.3099  loss_rpn_cls: 0.08117  loss_rpn_loc: 0.1985  time: 1.5428  data_time: 1.0477  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:36:39 d2.utils.events]: \u001b[0m eta: 0:16:52  iter: 8739  total_loss: 1.314  loss_cls: 0.2863  loss_box_reg: 0.4611  loss_mask: 0.2911  loss_rpn_cls: 0.08095  loss_rpn_loc: 0.1917  time: 1.5440  data_time: 1.3891  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:37:12 d2.utils.events]: \u001b[0m eta: 0:16:38  iter: 8759  total_loss: 1.27  loss_cls: 0.2734  loss_box_reg: 0.4539  loss_mask: 0.2743  loss_rpn_cls: 0.04726  loss_rpn_loc: 0.1694  time: 1.5442  data_time: 1.0109  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:37:49 d2.utils.events]: \u001b[0m eta: 0:16:23  iter: 8779  total_loss: 1.429  loss_cls: 0.3245  loss_box_reg: 0.5018  loss_mask: 0.2873  loss_rpn_cls: 0.07237  loss_rpn_loc: 0.194  time: 1.5448  data_time: 1.1501  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:38:22 d2.utils.events]: \u001b[0m eta: 0:16:15  iter: 8799  total_loss: 1.365  loss_cls: 0.3198  loss_box_reg: 0.4876  loss_mask: 0.2918  loss_rpn_cls: 0.07036  loss_rpn_loc: 0.2019  time: 1.5451  data_time: 1.0119  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:38:56 d2.utils.events]: \u001b[0m eta: 0:16:02  iter: 8819  total_loss: 1.335  loss_cls: 0.2926  loss_box_reg: 0.4899  loss_mask: 0.2971  loss_rpn_cls: 0.0712  loss_rpn_loc: 0.1948  time: 1.5455  data_time: 1.0456  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:39:32 d2.utils.events]: \u001b[0m eta: 0:15:50  iter: 8839  total_loss: 1.333  loss_cls: 0.2865  loss_box_reg: 0.472  loss_mask: 0.2939  loss_rpn_cls: 0.06194  loss_rpn_loc: 0.2102  time: 1.5460  data_time: 1.1606  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:39:58 d2.utils.events]: \u001b[0m eta: 0:15:30  iter: 8859  total_loss: 1.342  loss_cls: 0.3253  loss_box_reg: 0.5153  loss_mask: 0.2974  loss_rpn_cls: 0.06417  loss_rpn_loc: 0.1756  time: 1.5455  data_time: 0.6547  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:40:27 d2.utils.events]: \u001b[0m eta: 0:15:10  iter: 8879  total_loss: 1.365  loss_cls: 0.2897  loss_box_reg: 0.4828  loss_mask: 0.2939  loss_rpn_cls: 0.0551  loss_rpn_loc: 0.1815  time: 1.5452  data_time: 0.8037  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:40:58 d2.utils.events]: \u001b[0m eta: 0:14:53  iter: 8899  total_loss: 1.358  loss_cls: 0.3186  loss_box_reg: 0.4967  loss_mask: 0.2939  loss_rpn_cls: 0.06872  loss_rpn_loc: 0.2051  time: 1.5453  data_time: 0.9215  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:41:26 d2.utils.events]: \u001b[0m eta: 0:14:36  iter: 8919  total_loss: 1.385  loss_cls: 0.2863  loss_box_reg: 0.4941  loss_mask: 0.3021  loss_rpn_cls: 0.06004  loss_rpn_loc: 0.1836  time: 1.5449  data_time: 0.7731  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:41:59 d2.utils.events]: \u001b[0m eta: 0:14:19  iter: 8939  total_loss: 1.334  loss_cls: 0.275  loss_box_reg: 0.4754  loss_mask: 0.3013  loss_rpn_cls: 0.05749  loss_rpn_loc: 0.2035  time: 1.5452  data_time: 1.0150  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:42:27 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:42:27 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 00:42:27 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 00:42:27 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 00:42:28 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:42:28 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 00:42:30 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0016 s/iter. Inference: 0.0888 s/iter. Eval: 0.0333 s/iter. Total: 0.1237 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/13 00:42:35 d2.evaluation.evaluator]: \u001b[0mInference done 48/121. Dataloading: 0.0021 s/iter. Inference: 0.0929 s/iter. Eval: 0.0397 s/iter. Total: 0.1348 s/iter. ETA=0:00:09\n",
      "\u001b[32m[04/13 00:42:40 d2.evaluation.evaluator]: \u001b[0mInference done 85/121. Dataloading: 0.0021 s/iter. Inference: 0.0928 s/iter. Eval: 0.0401 s/iter. Total: 0.1351 s/iter. ETA=0:00:04\n",
      "\u001b[32m[04/13 00:42:45 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.632990 (0.134767 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:42:45 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.092195 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:42:45 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 00:42:45 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.26123534373252455\n",
      "\u001b[32m[04/13 00:42:45 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.26124, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 00:42:51 d2.utils.events]: \u001b[0m eta: 0:14:02  iter: 8959  total_loss: 1.416  loss_cls: 0.3262  loss_box_reg: 0.4959  loss_mask: 0.3031  loss_rpn_cls: 0.06844  loss_rpn_loc: 0.2011  time: 1.5455  data_time: 1.0178  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:43:15 d2.utils.events]: \u001b[0m eta: 0:13:42  iter: 8979  total_loss: 1.244  loss_cls: 0.2463  loss_box_reg: 0.4787  loss_mask: 0.2871  loss_rpn_cls: 0.04769  loss_rpn_loc: 0.1793  time: 1.5447  data_time: 0.5848  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:43:50 d2.utils.events]: \u001b[0m eta: 0:13:26  iter: 8999  total_loss: 1.314  loss_cls: 0.2832  loss_box_reg: 0.461  loss_mask: 0.2921  loss_rpn_cls: 0.06016  loss_rpn_loc: 0.1764  time: 1.5452  data_time: 1.1325  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:44:25 d2.utils.events]: \u001b[0m eta: 0:13:13  iter: 9019  total_loss: 1.352  loss_cls: 0.3232  loss_box_reg: 0.4561  loss_mask: 0.3082  loss_rpn_cls: 0.0822  loss_rpn_loc: 0.1831  time: 1.5457  data_time: 1.0961  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:44:51 d2.utils.events]: \u001b[0m eta: 0:12:53  iter: 9039  total_loss: 1.337  loss_cls: 0.2805  loss_box_reg: 0.4756  loss_mask: 0.3007  loss_rpn_cls: 0.06236  loss_rpn_loc: 0.1808  time: 1.5451  data_time: 0.6400  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:45:13 d2.utils.events]: \u001b[0m eta: 0:12:35  iter: 9059  total_loss: 1.313  loss_cls: 0.2749  loss_box_reg: 0.495  loss_mask: 0.2908  loss_rpn_cls: 0.04579  loss_rpn_loc: 0.1696  time: 1.5441  data_time: 0.4914  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:45:59 d2.utils.events]: \u001b[0m eta: 0:12:21  iter: 9079  total_loss: 1.504  loss_cls: 0.3565  loss_box_reg: 0.5077  loss_mask: 0.3168  loss_rpn_cls: 0.09556  loss_rpn_loc: 0.2072  time: 1.5458  data_time: 1.5989  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:46:25 d2.utils.events]: \u001b[0m eta: 0:12:02  iter: 9099  total_loss: 1.213  loss_cls: 0.2394  loss_box_reg: 0.4488  loss_mask: 0.2953  loss_rpn_cls: 0.04032  loss_rpn_loc: 0.1716  time: 1.5452  data_time: 0.6504  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:46:55 d2.utils.events]: \u001b[0m eta: 0:11:44  iter: 9119  total_loss: 1.369  loss_cls: 0.2934  loss_box_reg: 0.5077  loss_mask: 0.3009  loss_rpn_cls: 0.05688  loss_rpn_loc: 0.1841  time: 1.5451  data_time: 0.8480  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:47:24 d2.utils.events]: \u001b[0m eta: 0:11:28  iter: 9139  total_loss: 1.258  loss_cls: 0.2725  loss_box_reg: 0.47  loss_mask: 0.2939  loss_rpn_cls: 0.05082  loss_rpn_loc: 0.1599  time: 1.5449  data_time: 0.8030  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:47:54 d2.utils.events]: \u001b[0m eta: 0:11:14  iter: 9159  total_loss: 1.339  loss_cls: 0.2976  loss_box_reg: 0.4679  loss_mask: 0.2835  loss_rpn_cls: 0.07414  loss_rpn_loc: 0.2041  time: 1.5448  data_time: 0.8732  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:48:34 d2.utils.events]: \u001b[0m eta: 0:11:00  iter: 9179  total_loss: 1.368  loss_cls: 0.322  loss_box_reg: 0.4936  loss_mask: 0.2878  loss_rpn_cls: 0.08695  loss_rpn_loc: 0.199  time: 1.5458  data_time: 1.2760  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:49:10 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:49:11 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 00:49:11 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 00:49:11 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 00:49:11 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:49:12 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 00:49:14 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0037 s/iter. Inference: 0.1061 s/iter. Eval: 0.0636 s/iter. Total: 0.1734 s/iter. ETA=0:00:19\n",
      "\u001b[32m[04/13 00:49:19 d2.evaluation.evaluator]: \u001b[0mInference done 48/121. Dataloading: 0.0025 s/iter. Inference: 0.0948 s/iter. Eval: 0.0448 s/iter. Total: 0.1422 s/iter. ETA=0:00:10\n",
      "\u001b[32m[04/13 00:49:24 d2.evaluation.evaluator]: \u001b[0mInference done 85/121. Dataloading: 0.0025 s/iter. Inference: 0.0942 s/iter. Eval: 0.0427 s/iter. Total: 0.1395 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/13 00:49:29 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.929797 (0.137326 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:49:29 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.093351 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:49:29 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 00:49:29 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.25195623348253493\n",
      "\u001b[32m[04/13 00:49:29 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25196, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 00:49:32 d2.utils.events]: \u001b[0m eta: 0:10:44  iter: 9199  total_loss: 1.425  loss_cls: 0.3262  loss_box_reg: 0.4903  loss_mask: 0.3074  loss_rpn_cls: 0.08623  loss_rpn_loc: 0.1886  time: 1.5466  data_time: 1.2579  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:50:10 d2.utils.events]: \u001b[0m eta: 0:10:28  iter: 9219  total_loss: 1.332  loss_cls: 0.2858  loss_box_reg: 0.4666  loss_mask: 0.2902  loss_rpn_cls: 0.07248  loss_rpn_loc: 0.2143  time: 1.5473  data_time: 1.1989  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:50:52 d2.utils.events]: \u001b[0m eta: 0:10:15  iter: 9239  total_loss: 1.28  loss_cls: 0.2769  loss_box_reg: 0.4637  loss_mask: 0.2925  loss_rpn_cls: 0.08282  loss_rpn_loc: 0.1848  time: 1.5485  data_time: 1.4649  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:51:24 d2.utils.events]: \u001b[0m eta: 0:10:01  iter: 9259  total_loss: 1.361  loss_cls: 0.3243  loss_box_reg: 0.4943  loss_mask: 0.3026  loss_rpn_cls: 0.08695  loss_rpn_loc: 0.1965  time: 1.5486  data_time: 0.9144  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:51:55 d2.utils.events]: \u001b[0m eta: 0:09:48  iter: 9279  total_loss: 1.403  loss_cls: 0.3015  loss_box_reg: 0.473  loss_mask: 0.3017  loss_rpn_cls: 0.0809  loss_rpn_loc: 0.1961  time: 1.5486  data_time: 0.8995  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:52:27 d2.utils.events]: \u001b[0m eta: 0:09:34  iter: 9299  total_loss: 1.329  loss_cls: 0.2966  loss_box_reg: 0.4782  loss_mask: 0.2861  loss_rpn_cls: 0.05812  loss_rpn_loc: 0.177  time: 1.5488  data_time: 0.9433  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:53:00 d2.utils.events]: \u001b[0m eta: 0:09:17  iter: 9319  total_loss: 1.444  loss_cls: 0.3231  loss_box_reg: 0.4858  loss_mask: 0.3058  loss_rpn_cls: 0.08209  loss_rpn_loc: 0.2003  time: 1.5490  data_time: 1.0022  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:53:29 d2.utils.events]: \u001b[0m eta: 0:09:01  iter: 9339  total_loss: 1.303  loss_cls: 0.3069  loss_box_reg: 0.4723  loss_mask: 0.291  loss_rpn_cls: 0.06178  loss_rpn_loc: 0.1888  time: 1.5487  data_time: 0.8055  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:53:47 d2.utils.events]: \u001b[0m eta: 0:08:40  iter: 9359  total_loss: 1.214  loss_cls: 0.2542  loss_box_reg: 0.4784  loss_mask: 0.2846  loss_rpn_cls: 0.03889  loss_rpn_loc: 0.1583  time: 1.5474  data_time: 0.3092  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:54:21 d2.utils.events]: \u001b[0m eta: 0:08:23  iter: 9379  total_loss: 1.343  loss_cls: 0.2912  loss_box_reg: 0.4641  loss_mask: 0.2894  loss_rpn_cls: 0.06713  loss_rpn_loc: 0.191  time: 1.5477  data_time: 1.0637  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:54:55 d2.utils.events]: \u001b[0m eta: 0:08:03  iter: 9399  total_loss: 1.42  loss_cls: 0.3033  loss_box_reg: 0.4916  loss_mask: 0.299  loss_rpn_cls: 0.05411  loss_rpn_loc: 0.1895  time: 1.5480  data_time: 1.0305  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:55:26 d2.utils.events]: \u001b[0m eta: 0:07:47  iter: 9419  total_loss: 1.367  loss_cls: 0.314  loss_box_reg: 0.4896  loss_mask: 0.2998  loss_rpn_cls: 0.07159  loss_rpn_loc: 0.1915  time: 1.5480  data_time: 0.9266  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:56:00 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:56:01 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 00:56:01 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 00:56:01 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 00:56:01 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 00:56:02 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 00:56:04 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0034 s/iter. Inference: 0.1000 s/iter. Eval: 0.0514 s/iter. Total: 0.1548 s/iter. ETA=0:00:17\n",
      "\u001b[32m[04/13 00:56:09 d2.evaluation.evaluator]: \u001b[0mInference done 47/121. Dataloading: 0.0030 s/iter. Inference: 0.0948 s/iter. Eval: 0.0454 s/iter. Total: 0.1434 s/iter. ETA=0:00:10\n",
      "\u001b[32m[04/13 00:56:14 d2.evaluation.evaluator]: \u001b[0mInference done 83/121. Dataloading: 0.0027 s/iter. Inference: 0.0947 s/iter. Eval: 0.0445 s/iter. Total: 0.1421 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/13 00:56:19 d2.evaluation.evaluator]: \u001b[0mInference done 120/121. Dataloading: 0.0027 s/iter. Inference: 0.0943 s/iter. Eval: 0.0433 s/iter. Total: 0.1404 s/iter. ETA=0:00:00\n",
      "\u001b[32m[04/13 00:56:19 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:16.323725 (0.140722 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:56:19 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.094194 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 00:56:19 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 00:56:19 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.25603268506645943\n",
      "\u001b[32m[04/13 00:56:19 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.25603, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 00:56:21 d2.utils.events]: \u001b[0m eta: 0:07:33  iter: 9439  total_loss: 1.302  loss_cls: 0.2845  loss_box_reg: 0.4729  loss_mask: 0.2876  loss_rpn_cls: 0.05886  loss_rpn_loc: 0.1943  time: 1.5484  data_time: 1.0465  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:56:43 d2.utils.events]: \u001b[0m eta: 0:07:14  iter: 9459  total_loss: 1.284  loss_cls: 0.2645  loss_box_reg: 0.4772  loss_mask: 0.2979  loss_rpn_cls: 0.05748  loss_rpn_loc: 0.1777  time: 1.5475  data_time: 0.4758  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:57:25 d2.utils.events]: \u001b[0m eta: 0:07:02  iter: 9479  total_loss: 1.474  loss_cls: 0.3239  loss_box_reg: 0.4678  loss_mask: 0.3127  loss_rpn_cls: 0.07528  loss_rpn_loc: 0.1831  time: 1.5487  data_time: 1.4107  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:57:54 d2.utils.events]: \u001b[0m eta: 0:06:48  iter: 9499  total_loss: 1.352  loss_cls: 0.3104  loss_box_reg: 0.4734  loss_mask: 0.2844  loss_rpn_cls: 0.08186  loss_rpn_loc: 0.2054  time: 1.5485  data_time: 0.8062  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:58:36 d2.utils.events]: \u001b[0m eta: 0:06:30  iter: 9519  total_loss: 1.45  loss_cls: 0.3288  loss_box_reg: 0.5075  loss_mask: 0.2993  loss_rpn_cls: 0.06956  loss_rpn_loc: 0.2139  time: 1.5496  data_time: 1.4051  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:59:09 d2.utils.events]: \u001b[0m eta: 0:06:16  iter: 9539  total_loss: 1.295  loss_cls: 0.2438  loss_box_reg: 0.4724  loss_mask: 0.2861  loss_rpn_cls: 0.06553  loss_rpn_loc: 0.1876  time: 1.5499  data_time: 1.0268  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 00:59:44 d2.utils.events]: \u001b[0m eta: 0:05:59  iter: 9559  total_loss: 1.354  loss_cls: 0.3188  loss_box_reg: 0.4834  loss_mask: 0.3009  loss_rpn_cls: 0.06407  loss_rpn_loc: 0.197  time: 1.5502  data_time: 1.0742  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:00:08 d2.utils.events]: \u001b[0m eta: 0:05:46  iter: 9579  total_loss: 1.286  loss_cls: 0.2849  loss_box_reg: 0.4929  loss_mask: 0.3056  loss_rpn_cls: 0.03284  loss_rpn_loc: 0.1746  time: 1.5495  data_time: 0.5860  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:00:28 d2.utils.events]: \u001b[0m eta: 0:05:25  iter: 9599  total_loss: 1.268  loss_cls: 0.2692  loss_box_reg: 0.5037  loss_mask: 0.2843  loss_rpn_cls: 0.03592  loss_rpn_loc: 0.172  time: 1.5484  data_time: 0.4243  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:00:59 d2.utils.events]: \u001b[0m eta: 0:05:12  iter: 9619  total_loss: 1.343  loss_cls: 0.2843  loss_box_reg: 0.4762  loss_mask: 0.3067  loss_rpn_cls: 0.0643  loss_rpn_loc: 0.1915  time: 1.5483  data_time: 0.8788  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:01:31 d2.utils.events]: \u001b[0m eta: 0:04:57  iter: 9639  total_loss: 1.392  loss_cls: 0.3125  loss_box_reg: 0.4965  loss_mask: 0.3143  loss_rpn_cls: 0.06844  loss_rpn_loc: 0.1894  time: 1.5484  data_time: 0.9528  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:02:04 d2.utils.events]: \u001b[0m eta: 0:04:41  iter: 9659  total_loss: 1.253  loss_cls: 0.2799  loss_box_reg: 0.4785  loss_mask: 0.2899  loss_rpn_cls: 0.06578  loss_rpn_loc: 0.1621  time: 1.5487  data_time: 1.0254  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:02:36 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 01:02:37 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 01:02:37 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 01:02:37 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 01:02:37 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 01:02:38 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 01:02:39 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0015 s/iter. Inference: 0.0901 s/iter. Eval: 0.0359 s/iter. Total: 0.1275 s/iter. ETA=0:00:14\n",
      "\u001b[32m[04/13 01:02:44 d2.evaluation.evaluator]: \u001b[0mInference done 47/121. Dataloading: 0.0024 s/iter. Inference: 0.0933 s/iter. Eval: 0.0422 s/iter. Total: 0.1380 s/iter. ETA=0:00:10\n",
      "\u001b[32m[04/13 01:02:49 d2.evaluation.evaluator]: \u001b[0mInference done 82/121. Dataloading: 0.0028 s/iter. Inference: 0.0949 s/iter. Eval: 0.0432 s/iter. Total: 0.1410 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/13 01:02:54 d2.evaluation.evaluator]: \u001b[0mInference done 120/121. Dataloading: 0.0026 s/iter. Inference: 0.0936 s/iter. Eval: 0.0423 s/iter. Total: 0.1386 s/iter. ETA=0:00:00\n",
      "\u001b[32m[04/13 01:02:55 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:16.124377 (0.139003 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 01:02:55 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.093613 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 01:02:55 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 01:02:55 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.26177446431089874\n",
      "\u001b[32m[04/13 01:02:55 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.26177, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 01:02:55 d2.utils.events]: \u001b[0m eta: 0:04:24  iter: 9679  total_loss: 1.327  loss_cls: 0.2722  loss_box_reg: 0.4775  loss_mask: 0.3037  loss_rpn_cls: 0.06881  loss_rpn_loc: 0.1929  time: 1.5487  data_time: 0.9298  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:03:24 d2.utils.events]: \u001b[0m eta: 0:04:08  iter: 9699  total_loss: 1.246  loss_cls: 0.267  loss_box_reg: 0.4642  loss_mask: 0.2803  loss_rpn_cls: 0.0531  loss_rpn_loc: 0.172  time: 1.5485  data_time: 0.7994  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:03:48 d2.utils.events]: \u001b[0m eta: 0:03:49  iter: 9719  total_loss: 1.27  loss_cls: 0.259  loss_box_reg: 0.4817  loss_mask: 0.3065  loss_rpn_cls: 0.04755  loss_rpn_loc: 0.1746  time: 1.5478  data_time: 0.6009  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:04:16 d2.utils.events]: \u001b[0m eta: 0:03:29  iter: 9739  total_loss: 1.368  loss_cls: 0.2737  loss_box_reg: 0.5182  loss_mask: 0.3102  loss_rpn_cls: 0.05746  loss_rpn_loc: 0.1856  time: 1.5476  data_time: 0.7911  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:04:44 d2.utils.events]: \u001b[0m eta: 0:03:14  iter: 9759  total_loss: 1.427  loss_cls: 0.3291  loss_box_reg: 0.4913  loss_mask: 0.2893  loss_rpn_cls: 0.07496  loss_rpn_loc: 0.1896  time: 1.5472  data_time: 0.7360  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:05:25 d2.utils.events]: \u001b[0m eta: 0:02:58  iter: 9779  total_loss: 1.334  loss_cls: 0.2996  loss_box_reg: 0.4585  loss_mask: 0.2929  loss_rpn_cls: 0.07476  loss_rpn_loc: 0.1998  time: 1.5482  data_time: 1.3850  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:05:58 d2.utils.events]: \u001b[0m eta: 0:02:41  iter: 9799  total_loss: 1.323  loss_cls: 0.2976  loss_box_reg: 0.4771  loss_mask: 0.2973  loss_rpn_cls: 0.05607  loss_rpn_loc: 0.1913  time: 1.5485  data_time: 1.0051  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:06:35 d2.utils.events]: \u001b[0m eta: 0:02:24  iter: 9819  total_loss: 1.39  loss_cls: 0.3071  loss_box_reg: 0.4706  loss_mask: 0.3075  loss_rpn_cls: 0.0752  loss_rpn_loc: 0.2023  time: 1.5491  data_time: 1.1797  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:07:05 d2.utils.events]: \u001b[0m eta: 0:02:09  iter: 9839  total_loss: 1.324  loss_cls: 0.2701  loss_box_reg: 0.4739  loss_mask: 0.295  loss_rpn_cls: 0.06269  loss_rpn_loc: 0.1868  time: 1.5490  data_time: 0.8502  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:07:36 d2.utils.events]: \u001b[0m eta: 0:01:53  iter: 9859  total_loss: 1.263  loss_cls: 0.2631  loss_box_reg: 0.4489  loss_mask: 0.2919  loss_rpn_cls: 0.06722  loss_rpn_loc: 0.1878  time: 1.5490  data_time: 0.8525  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:08:11 d2.utils.events]: \u001b[0m eta: 0:01:37  iter: 9879  total_loss: 1.283  loss_cls: 0.2688  loss_box_reg: 0.4687  loss_mask: 0.2933  loss_rpn_cls: 0.05583  loss_rpn_loc: 0.179  time: 1.5494  data_time: 1.1140  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:08:40 d2.utils.events]: \u001b[0m eta: 0:01:21  iter: 9899  total_loss: 1.273  loss_cls: 0.2642  loss_box_reg: 0.4691  loss_mask: 0.282  loss_rpn_cls: 0.05855  loss_rpn_loc: 0.1871  time: 1.5491  data_time: 0.7697  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:09:10 d2.utils.events]: \u001b[0m eta: 0:01:05  iter: 9919  total_loss: 1.255  loss_cls: 0.2545  loss_box_reg: 0.4725  loss_mask: 0.3008  loss_rpn_cls: 0.04466  loss_rpn_loc: 0.1893  time: 1.5490  data_time: 0.8449  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:09:13 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 01:09:13 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 01:09:13 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 01:09:14 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 01:09:14 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 01:09:15 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 01:09:17 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0015 s/iter. Inference: 0.0890 s/iter. Eval: 0.0330 s/iter. Total: 0.1235 s/iter. ETA=0:00:13\n",
      "\u001b[32m[04/13 01:09:22 d2.evaluation.evaluator]: \u001b[0mInference done 45/121. Dataloading: 0.0026 s/iter. Inference: 0.0959 s/iter. Eval: 0.0467 s/iter. Total: 0.1454 s/iter. ETA=0:00:11\n",
      "\u001b[32m[04/13 01:09:27 d2.evaluation.evaluator]: \u001b[0mInference done 83/121. Dataloading: 0.0025 s/iter. Inference: 0.0943 s/iter. Eval: 0.0427 s/iter. Total: 0.1396 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/13 01:09:32 d2.evaluation.evaluator]: \u001b[0mInference done 120/121. Dataloading: 0.0026 s/iter. Inference: 0.0944 s/iter. Eval: 0.0425 s/iter. Total: 0.1396 s/iter. ETA=0:00:00\n",
      "\u001b[32m[04/13 01:09:32 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:16.240031 (0.140000 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 01:09:32 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.094372 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 01:09:32 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 01:09:32 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.2633467757659755\n",
      "\u001b[32m[04/13 01:09:32 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.26335, not better than best score 0.26367 @ iteration 5323.\n",
      "\u001b[32m[04/13 01:09:57 d2.utils.events]: \u001b[0m eta: 0:00:48  iter: 9939  total_loss: 1.296  loss_cls: 0.2643  loss_box_reg: 0.4759  loss_mask: 0.2904  loss_rpn_cls: 0.06508  loss_rpn_loc: 0.187  time: 1.5488  data_time: 0.7688  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:10:23 d2.utils.events]: \u001b[0m eta: 0:00:32  iter: 9959  total_loss: 1.306  loss_cls: 0.2882  loss_box_reg: 0.4815  loss_mask: 0.2807  loss_rpn_cls: 0.04925  loss_rpn_loc: 0.1618  time: 1.5482  data_time: 0.6809  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:10:56 d2.utils.events]: \u001b[0m eta: 0:00:16  iter: 9979  total_loss: 1.237  loss_cls: 0.2707  loss_box_reg: 0.452  loss_mask: 0.2825  loss_rpn_cls: 0.04221  loss_rpn_loc: 0.1641  time: 1.5485  data_time: 1.0085  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:11:34 d2.utils.events]: \u001b[0m eta: 0:00:00  iter: 9999  total_loss: 1.289  loss_cls: 0.2946  loss_box_reg: 0.4556  loss_mask: 0.2909  loss_rpn_cls: 0.05704  loss_rpn_loc: 0.1699  time: 1.5488  data_time: 1.1019  lr: 0.001  max_mem: 6707M\n",
      "\u001b[32m[04/13 01:11:34 d2.engine.hooks]: \u001b[0mOverall training speed: 9998 iterations in 4:18:05 (1.5488 s / it)\n",
      "\u001b[32m[04/13 01:11:34 d2.engine.hooks]: \u001b[0mTotal training time: 4:31:21 (0:13:16 on hooks)\n",
      "\u001b[32m[04/13 01:11:35 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 01:11:35 d2.data.dataset_mapper]: \u001b[0m[DatasetMapper] Augmentations used in inference: [ResizeShortestEdge(short_edge_length=(800, 800), max_size=1333, sample_style='choice')]\n",
      "\u001b[32m[04/13 01:11:35 d2.data.common]: \u001b[0mSerializing 121 elements to byte tensors and concatenating them all ...\n",
      "\u001b[32m[04/13 01:11:35 d2.data.common]: \u001b[0mSerialized dataset takes 1.66 MiB\n",
      "\u001b[32m[04/13 01:11:36 d2.data.datasets.coco]: \u001b[0mLoaded 121 images in COCO format from ../input/sartoriusannotations/annotations_valid_f4.json\n",
      "\u001b[32m[04/13 01:11:36 d2.evaluation.evaluator]: \u001b[0mStart inference on 121 batches\n",
      "\u001b[32m[04/13 01:11:37 d2.evaluation.evaluator]: \u001b[0mInference done 11/121. Dataloading: 0.0015 s/iter. Inference: 0.0908 s/iter. Eval: 0.0367 s/iter. Total: 0.1290 s/iter. ETA=0:00:14\n",
      "\u001b[32m[04/13 01:11:43 d2.evaluation.evaluator]: \u001b[0mInference done 47/121. Dataloading: 0.0023 s/iter. Inference: 0.0933 s/iter. Eval: 0.0422 s/iter. Total: 0.1379 s/iter. ETA=0:00:10\n",
      "\u001b[32m[04/13 01:11:48 d2.evaluation.evaluator]: \u001b[0mInference done 84/121. Dataloading: 0.0023 s/iter. Inference: 0.0928 s/iter. Eval: 0.0417 s/iter. Total: 0.1369 s/iter. ETA=0:00:05\n",
      "\u001b[32m[04/13 01:11:53 d2.evaluation.evaluator]: \u001b[0mInference done 121/121. Dataloading: 0.0023 s/iter. Inference: 0.0929 s/iter. Eval: 0.0412 s/iter. Total: 0.1364 s/iter. ETA=0:00:00\n",
      "\u001b[32m[04/13 01:11:53 d2.evaluation.evaluator]: \u001b[0mTotal inference time: 0:00:15.888813 (0.136973 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 01:11:53 d2.evaluation.evaluator]: \u001b[0mTotal inference pure compute time: 0:00:10 (0.092875 s / iter per device, on 1 devices)\n",
      "\u001b[32m[04/13 01:11:53 d2.engine.defaults]: \u001b[0mEvaluation results for sartorius_val in csv format:\n",
      "\u001b[32m[04/13 01:11:53 d2.evaluation.testing]: \u001b[0mcopypaste: MaP IoU=0.235168872154732\n",
      "\u001b[32m[04/13 01:11:53 d2.engine.hooks]: \u001b[0mNot saving as latest eval score for MaP IoU is 0.23517, not better than best score 0.26367 @ iteration 5323.\n"
     ]
    }
   ],
   "source": [
    "os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n",
    "trainer = Trainer(cfg) \n",
    "trainer.resume_or_load(resume=False)\n",
    "trainer.train()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.12"
  },
  "papermill": {
   "default_parameters": {},
   "duration": 16561.939255,
   "end_time": "2022-04-13T01:11:58.554105",
   "environment_variables": {},
   "exception": null,
   "input_path": "__notebook__.ipynb",
   "output_path": "__notebook__.ipynb",
   "parameters": {},
   "start_time": "2022-04-12T20:35:56.614850",
   "version": "2.3.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
