{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "\"\"\"\n",
    "Mask R-CNN\n",
    "Train on the nuclei segmentation dataset from the\n",
    "Kaggle 2018 Data Science Bowl\n",
    "https://www.kaggle.com/c/data-science-bowl-2018/\n",
    "\n",
    "Licensed under the MIT License (see LICENSE for details)\n",
    "Written by Waleed Abdulla\n",
    "\n",
    "------------------------------------------------------------\n",
    "\n",
    "Usage: import the module (see Jupyter notebooks for examples), or run from\n",
    "       the command line as such:\n",
    "\n",
    "    # Train a new model starting from ImageNet weights\n",
    "    python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=imagenet\n",
    "\n",
    "    # Train a new model starting from specific weights file\n",
    "    python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=/path/to/weights.h5\n",
    "\n",
    "    # Resume training a model that you had trained earlier\n",
    "    python3 nucleus.py train --dataset=/path/to/dataset --subset=train --weights=last\n",
    "\n",
    "    # Generate submission file\n",
    "    python3 nucleus.py detect --dataset=/path/to/dataset --subset=train --weights=<last or /path/to/weights.h5>\n",
    "\"\"\"\n",
    "\n",
    "# Set matplotlib backend\n",
    "# This has to be done before other importa that might\n",
    "# set it, but only if we're running in script mode\n",
    "# rather than being imported.\n",
    "if __name__ == '__main__':\n",
    "    import matplotlib\n",
    "    # Agg backend runs without a display\n",
    "    matplotlib.use('Agg')\n",
    "    import matplotlib.pyplot as plt\n",
    "\n",
    "import os\n",
    "import sys\n",
    "import json\n",
    "import datetime\n",
    "import numpy as np\n",
    "import skimage.io\n",
    "from imgaug import augmenters as iaa\n",
    "\n",
    "# Root directory of the project\n",
    "ROOT_DIR = os.path.abspath(\".\")\n",
    "\n",
    "# Import Mask RCNN\n",
    "sys.path.append(ROOT_DIR)  # To find local version of the library\n",
    "from mrcnn.config import Config\n",
    "from mrcnn import utils\n",
    "from mrcnn import model as modellib\n",
    "from mrcnn import visualize\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Path to trained weights file\n",
    "COCO_WEIGHTS_PATH = \"mask_rcnn_coco.h5\"\n",
    "\n",
    "# Directory to save logs and model checkpoints, if not provided\n",
    "# through the command line argument --logs\n",
    "DEFAULT_LOGS_DIR = \"mask_rcnn_logs\"\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "############################################################\n",
    "#  Configurations\n",
    "############################################################\n",
    "\n",
    "class LesionConfig(Config):\n",
    "    \"\"\"Configuration for training on the nucleus segmentation dataset.\"\"\"\n",
    "    # Give the configuration a recognizable name\n",
    "    NAME = \"lesion\"\n",
    "\n",
    "    # Adjust depending on your GPU memory\n",
    "    IMAGES_PER_GPU = 4\n",
    "\n",
    "    # Number of classes (including background)\n",
    "    NUM_CLASSES = 1 + 1  # Background + nucleus\n",
    "\n",
    "    # Number of training and validation steps per epoch\n",
    "    STEPS_PER_EPOCH = 1815 // IMAGES_PER_GPU\n",
    "    VALIDATION_STEPS = 779 // IMAGES_PER_GPU\n",
    "\n",
    "    # Don't exclude based on confidence. Since we have two classes\n",
    "    # then 0.5 is the minimum anyway as it picks between nucleus and BG\n",
    "    DETECTION_MIN_CONFIDENCE = 0\n",
    "\n",
    "    # Backbone network architecture\n",
    "    # Supported values are: resnet50, resnet101\n",
    "    BACKBONE = \"resnet50\"\n",
    "\n",
    "    # Input image resizing\n",
    "    # Random crops of size 512x512\n",
    "    IMAGE_RESIZE_MODE = \"crop\"\n",
    "    IMAGE_MIN_DIM = 512\n",
    "    IMAGE_MAX_DIM = 512\n",
    "    IMAGE_MIN_SCALE = 2.0\n",
    "\n",
    "    # Length of square anchor side in pixels\n",
    "    RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)\n",
    "\n",
    "    # ROIs kept after non-maximum supression (training and inference)\n",
    "    POST_NMS_ROIS_TRAINING = 1000\n",
    "    POST_NMS_ROIS_INFERENCE = 2000\n",
    "\n",
    "    # Non-max suppression threshold to filter RPN proposals.\n",
    "    # You can increase this during training to generate more propsals.\n",
    "    RPN_NMS_THRESHOLD = 0.9\n",
    "\n",
    "    # How many anchors per image to use for RPN training\n",
    "    RPN_TRAIN_ANCHORS_PER_IMAGE = 64\n",
    "\n",
    "    # Image mean (RGB)\n",
    "    MEAN_PIXEL = np.array([43.53, 39.56, 48.22])\n",
    "\n",
    "    # If enabled, resizes instance masks to a smaller size to reduce\n",
    "    # memory load. Recommended when using high-resolution images.\n",
    "    USE_MINI_MASK = True\n",
    "    MINI_MASK_SHAPE = (56, 56)  # (height, width) of the mini-mask\n",
    "\n",
    "    # Number of ROIs per image to feed to classifier/mask heads\n",
    "    # The Mask RCNN paper uses 512 but often the RPN doesn't generate\n",
    "    # enough positive proposals to fill this and keep a positive:negative\n",
    "    # ratio of 1:3. You can increase the number of proposals by adjusting\n",
    "    # the RPN NMS threshold.\n",
    "    TRAIN_ROIS_PER_IMAGE = 128\n",
    "\n",
    "    # Maximum number of ground truth instances to use in one image\n",
    "    MAX_GT_INSTANCES = 200\n",
    "\n",
    "    # Max number of final detections per image\n",
    "    DETECTION_MAX_INSTANCES = 2\n",
    "\n",
    "\n",
    "class LesionInferenceConfig(LesionConfig):\n",
    "    # Set batch size to 1 to run one image at a time\n",
    "    GPU_COUNT = 1\n",
    "    IMAGES_PER_GPU = 1\n",
    "    # Don't resize image for inferencing\n",
    "    IMAGE_RESIZE_MODE = \"pad64\"\n",
    "    # Non-max suppression threshold to filter RPN proposals.\n",
    "    # You can increase this during training to generate more propsals.\n",
    "    RPN_NMS_THRESHOLD = 0.7\n",
    "\n",
    "\n",
    "############################################################\n",
    "#  Dataset\n",
    "############################################################\n",
    "\n",
    "class LesionDataset(utils.Dataset):\n",
    "\n",
    "    def load_lesion(self, dataset_dir, subset):\n",
    "        \"\"\"Load a subset of the nuclei dataset.\n",
    "\n",
    "        dataset_dir: Root directory of the dataset\n",
    "        subset: Subset to load. Either the name of the sub-directory,\n",
    "                such as stage1_train, stage1_test, ...etc. or, one of:\n",
    "                * train: stage1_train excluding validation images\n",
    "                * val: validation images from VAL_IMAGE_IDS\n",
    "        \"\"\"\n",
    "        # Add classes. We have one class.\n",
    "        # Naming the dataset nucleus, and the class nucleus\n",
    "        self.add_class(\"lesion\", 1, \"lesion\")\n",
    "\n",
    "        # Which subset?\n",
    "        # \"val\": use hard-coded list above\n",
    "        # \"train\": use data from stage1_train minus the hard-coded list above\n",
    "        # else: use the data from the specified sub-directory\n",
    "        assert subset in [\"train\", \"val\", \"test\"]\n",
    "        subset_dir = \"test\" if subset in [\"test\", \"val\"] else subset\n",
    "        dataset_dir = os.path.join(dataset_dir, subset_dir, 'images')\n",
    "        \n",
    "        #List images\n",
    "        image_ids = next(os.walk(dataset_dir))[2]\n",
    "        \n",
    "        # Add images\n",
    "        for image_id in image_ids:\n",
    "            img_id = image_id.split('.')[0]\n",
    "            #print(\"Adding image ----{}\".format(img_id))\n",
    "            self.add_image(\n",
    "                \"lesion\",\n",
    "                image_id=img_id,\n",
    "                path=os.path.join(dataset_dir, image_id))\n",
    "\n",
    "    def load_mask(self, image_id):\n",
    "        \"\"\"Generate instance masks for an image.\n",
    "       Returns:\n",
    "        masks: A bool array of shape [height, width, instance count] with\n",
    "            one mask per instance.\n",
    "        class_ids: a 1D array of class IDs of the instance masks.\n",
    "        \"\"\"\n",
    "        info = self.image_info[image_id]\n",
    "        # Get mask directory from image path\n",
    "        mask_dir = os.path.join(os.path.dirname(os.path.dirname(info['path'])), \"masks\")\n",
    "\n",
    "        # Read mask files from .png image\n",
    "        mask = []\n",
    "        #print (\"Load mask....{}\".format(image_id))\n",
    "\n",
    "        f = \"{}_segmentation.png\".format(info['id'])\n",
    "        m = skimage.io.imread(os.path.join(mask_dir, f)).astype(np.bool)\n",
    "        mask.append(m)\n",
    "        mask = np.stack(mask, axis=-1)\n",
    "        # Return mask, and array of class IDs of each instance. Since we have\n",
    "        # one class ID, we return an array of ones\n",
    "        return mask, np.ones([mask.shape[-1]], dtype=np.int32)\n",
    "\n",
    "    def image_reference(self, image_id):\n",
    "        \"\"\"Return the path of the image.\"\"\"\n",
    "        info = self.image_info[image_id]\n",
    "        if info[\"source\"] == \"lesion\":\n",
    "            return info[\"id\"]\n",
    "        else:\n",
    "            super(self.__class__, self).image_reference(image_id)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "############################################################\n",
    "#  RLE Encoding\n",
    "############################################################\n",
    "\n",
    "def rle_encode(mask):\n",
    "    \"\"\"Encodes a mask in Run Length Encoding (RLE).\n",
    "    Returns a string of space-separated values.\n",
    "    \"\"\"\n",
    "    assert mask.ndim == 2, \"Mask must be of shape [Height, Width]\"\n",
    "    # Flatten it column wise\n",
    "    m = mask.T.flatten()\n",
    "    # Compute gradient. Equals 1 or -1 at transition points\n",
    "    g = np.diff(np.concatenate([[0], m, [0]]), n=1)\n",
    "    # 1-based indicies of transition points (where gradient != 0)\n",
    "    rle = np.where(g != 0)[0].reshape([-1, 2]) + 1\n",
    "    # Convert second index in each pair to lenth\n",
    "    rle[:, 1] = rle[:, 1] - rle[:, 0]\n",
    "    return \" \".join(map(str, rle.flatten()))\n",
    "\n",
    "\n",
    "def rle_decode(rle, shape):\n",
    "    \"\"\"Decodes an RLE encoded list of space separated\n",
    "    numbers and returns a binary mask.\"\"\"\n",
    "    rle = list(map(int, rle.split()))\n",
    "    rle = np.array(rle, dtype=np.int32).reshape([-1, 2])\n",
    "    rle[:, 1] += rle[:, 0]\n",
    "    rle -= 1\n",
    "    mask = np.zeros([shape[0] * shape[1]], np.bool)\n",
    "    for s, e in rle:\n",
    "        assert 0 <= s < mask.shape[0]\n",
    "        assert 1 <= e <= mask.shape[0], \"shape: {}  s {}  e {}\".format(shape, s, e)\n",
    "        mask[s:e] = 1\n",
    "    # Reshape and transpose\n",
    "    mask = mask.reshape([shape[1], shape[0]]).T\n",
    "    return mask\n",
    "\n",
    "\n",
    "def mask_to_rle(image_id, mask, scores):\n",
    "    \"Encodes instance masks to submission format.\"\n",
    "    assert mask.ndim == 3, \"Mask must be [H, W, count]\"\n",
    "    # If mask is empty, return line with image ID only\n",
    "    if mask.shape[-1] == 0:\n",
    "        return \"{},\".format(image_id)\n",
    "    # Remove mask overlaps\n",
    "    # Multiply each instance mask by its score order\n",
    "    # then take the maximum across the last dimension\n",
    "    order = np.argsort(scores)[::-1] + 1  # 1-based descending\n",
    "    mask = np.max(mask * np.reshape(order, [1, 1, -1]), -1)\n",
    "    # Loop over instance masks\n",
    "    lines = []\n",
    "    for o in order:\n",
    "        m = np.where(mask == o, 1, 0)\n",
    "        # Skip if empty\n",
    "        if m.sum() == 0.0:\n",
    "            continue\n",
    "        rle = rle_encode(m)\n",
    "        lines.append(\"{}, {}\".format(image_id, rle))\n",
    "    return \"\\n\".join(lines)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "############################################################\n",
    "#  Detection\n",
    "############################################################\n",
    "\n",
    "def detect(model, dataset_dir, subset):\n",
    "    \"\"\"Run detection on images in the given directory.\"\"\"\n",
    "    print(\"Running on {}\".format(dataset_dir))\n",
    "\n",
    "    # Create directory\n",
    "    if not os.path.exists(RESULTS_DIR):\n",
    "        os.makedirs(RESULTS_DIR)\n",
    "    submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n",
    "    submit_dir = os.path.join(RESULTS_DIR, submit_dir)\n",
    "    os.makedirs(submit_dir)\n",
    "\n",
    "    # Read dataset\n",
    "    dataset = LesionDataset()\n",
    "    dataset.load_lesion(dataset_dir, subset)\n",
    "    dataset.prepare()\n",
    "    # Load over images\n",
    "    submission = []\n",
    "    for image_id in dataset.image_ids:\n",
    "        # Load image and run detection\n",
    "        image = dataset.load_image(image_id)\n",
    "        # Detect objects\n",
    "        r = model.detect([image], verbose=0)[0]\n",
    "        # Encode image to RLE. Returns a string of multiple lines\n",
    "        source_id = dataset.image_info[image_id][\"id\"]\n",
    "        rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n",
    "        submission.append(rle)\n",
    "        # Save image with masks\n",
    "        visualize.display_instances(\n",
    "            image, r['rois'], r['masks'], r['class_ids'],\n",
    "            dataset.class_names, r['scores'],\n",
    "            show_bbox=False, show_mask=False,\n",
    "            title=\"Predictions\")\n",
    "        plt.savefig(\"{}/{}.png\".format(submit_dir, dataset.image_info[image_id][\"id\"]))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "############################################################\n",
    "#  Training\n",
    "############################################################\n",
    "\n",
    "def train(model, dataset_dir):\n",
    "    \"\"\"Train the model.\"\"\"\n",
    "    # Training dataset.\n",
    "    dataset_train = LesionDataset()\n",
    "    dataset_train.load_lesion(dataset_dir, \"train\")\n",
    "    dataset_train.prepare()\n",
    "\n",
    "    # Validation dataset\n",
    "    dataset_val = LesionDataset()\n",
    "    dataset_val.load_lesion(dataset_dir, \"test\")\n",
    "    dataset_val.prepare()\n",
    "\n",
    "    # Image augmentation\n",
    "    # http://imgaug.readthedocs.io/en/latest/source/augmenters.html\n",
    "    augmentation = iaa.SomeOf((0, 2), [\n",
    "        iaa.Fliplr(0.5),\n",
    "        iaa.Flipud(0.5),\n",
    "        iaa.OneOf([iaa.Affine(rotate=90),\n",
    "                   iaa.Affine(rotate=180),\n",
    "                   iaa.Affine(rotate=270)]),\n",
    "        iaa.Multiply((0.8, 1.5)),\n",
    "        iaa.GaussianBlur(sigma=(0.0, 5.0))\n",
    "    ])\n",
    "\n",
    "    # *** This training schedule is an example. Update to your needs ***\n",
    "\n",
    "    # If starting from imagenet, train heads only for a bit\n",
    "    # since they have random weights\n",
    "    print(\"Train network heads\")\n",
    "    model.train(dataset_train, dataset_val,\n",
    "                learning_rate=config.LEARNING_RATE,\n",
    "                epochs=20,\n",
    "                augmentation=augmentation,\n",
    "                layers='heads')\n",
    "\n",
    "    print(\"Train all layers\")\n",
    "    model.train(dataset_train, dataset_val,\n",
    "                learning_rate=config.LEARNING_RATE,\n",
    "                epochs=40,\n",
    "                augmentation=augmentation,\n",
    "                layers='all')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Configurations:\n",
      "BACKBONE                       resnet50\n",
      "BACKBONE_STRIDES               [4, 8, 16, 32, 64]\n",
      "BATCH_SIZE                     4\n",
      "BBOX_STD_DEV                   [0.1 0.1 0.2 0.2]\n",
      "COMPUTE_BACKBONE_SHAPE         None\n",
      "DETECTION_MAX_INSTANCES        2\n",
      "DETECTION_MIN_CONFIDENCE       0\n",
      "DETECTION_NMS_THRESHOLD        0.3\n",
      "FPN_CLASSIF_FC_LAYERS_SIZE     1024\n",
      "GPU_COUNT                      1\n",
      "GRADIENT_CLIP_NORM             5.0\n",
      "IMAGES_PER_GPU                 4\n",
      "IMAGE_CHANNEL_COUNT            3\n",
      "IMAGE_MAX_DIM                  512\n",
      "IMAGE_META_SIZE                14\n",
      "IMAGE_MIN_DIM                  512\n",
      "IMAGE_MIN_SCALE                2.0\n",
      "IMAGE_RESIZE_MODE              crop\n",
      "IMAGE_SHAPE                    [512 512   3]\n",
      "LEARNING_MOMENTUM              0.9\n",
      "LEARNING_RATE                  0.001\n",
      "LOSS_WEIGHTS                   {'rpn_class_loss': 1.0, 'rpn_bbox_loss': 1.0, 'mrcnn_class_loss': 1.0, 'mrcnn_bbox_loss': 1.0, 'mrcnn_mask_loss': 1.0}\n",
      "MASK_POOL_SIZE                 14\n",
      "MASK_SHAPE                     [28, 28]\n",
      "MAX_GT_INSTANCES               200\n",
      "MEAN_PIXEL                     [43.53 39.56 48.22]\n",
      "MINI_MASK_SHAPE                (56, 56)\n",
      "NAME                           lesion\n",
      "NUM_CLASSES                    2\n",
      "POOL_SIZE                      7\n",
      "POST_NMS_ROIS_INFERENCE        2000\n",
      "POST_NMS_ROIS_TRAINING         1000\n",
      "PRE_NMS_LIMIT                  6000\n",
      "ROI_POSITIVE_RATIO             0.33\n",
      "RPN_ANCHOR_RATIOS              [0.5, 1, 2]\n",
      "RPN_ANCHOR_SCALES              (8, 16, 32, 64, 128)\n",
      "RPN_ANCHOR_STRIDE              1\n",
      "RPN_BBOX_STD_DEV               [0.1 0.1 0.2 0.2]\n",
      "RPN_NMS_THRESHOLD              0.9\n",
      "RPN_TRAIN_ANCHORS_PER_IMAGE    64\n",
      "STEPS_PER_EPOCH                453\n",
      "TOP_DOWN_PYRAMID_SIZE          256\n",
      "TRAIN_BN                       False\n",
      "TRAIN_ROIS_PER_IMAGE           128\n",
      "USE_MINI_MASK                  True\n",
      "USE_RPN_ROIS                   True\n",
      "VALIDATION_STEPS               194\n",
      "WEIGHT_DECAY                   0.0001\n",
      "\n",
      "\n",
      "Loading weights  mask_rcnn_coco.h5\n",
      "Train network heads\n",
      "\n",
      "Starting at epoch 0. LR=0.001\n",
      "\n",
      "Checkpoint Path: mask_rcnn_logs/lesion20190424T0423/mask_rcnn_lesion_{epoch:04d}.h5\n",
      "Selecting layers to train\n",
      "fpn_c5p5               (Conv2D)\n",
      "fpn_c4p4               (Conv2D)\n",
      "fpn_c3p3               (Conv2D)\n",
      "fpn_c2p2               (Conv2D)\n",
      "fpn_p5                 (Conv2D)\n",
      "fpn_p2                 (Conv2D)\n",
      "fpn_p3                 (Conv2D)\n",
      "fpn_p4                 (Conv2D)\n",
      "In model:  rpn_model\n",
      "    rpn_conv_shared        (Conv2D)\n",
      "    rpn_class_raw          (Conv2D)\n",
      "    rpn_bbox_pred          (Conv2D)\n",
      "mrcnn_mask_conv1       (TimeDistributed)\n",
      "mrcnn_mask_bn1         (TimeDistributed)\n",
      "mrcnn_mask_conv2       (TimeDistributed)\n",
      "mrcnn_mask_bn2         (TimeDistributed)\n",
      "mrcnn_class_conv1      (TimeDistributed)\n",
      "mrcnn_class_bn1        (TimeDistributed)\n",
      "mrcnn_mask_conv3       (TimeDistributed)\n",
      "mrcnn_mask_bn3         (TimeDistributed)\n",
      "mrcnn_class_conv2      (TimeDistributed)\n",
      "mrcnn_class_bn2        (TimeDistributed)\n",
      "mrcnn_mask_conv4       (TimeDistributed)\n",
      "mrcnn_mask_bn4         (TimeDistributed)\n",
      "mrcnn_bbox_fc          (TimeDistributed)\n",
      "mrcnn_mask_deconv      (TimeDistributed)\n",
      "mrcnn_class_logits     (TimeDistributed)\n",
      "mrcnn_mask             (TimeDistributed)\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/anaconda/envs/py36/lib/python3.6/site-packages/tensorflow/python/ops/gradients_impl.py:112: UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape. This may consume a large amount of memory.\n",
      "  \"Converting sparse IndexedSlices to a dense Tensor of unknown shape. \"\n",
      "/anaconda/envs/py36/lib/python3.6/site-packages/keras/engine/training.py:2087: UserWarning: Using a generator with `use_multiprocessing=True` and multiple workers may duplicate your data. Please consider using the`keras.utils.Sequence class.\n",
      "  UserWarning('Using a generator with `use_multiprocessing=True`'\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/20\n",
      "452/453 [============================>.] - ETA: 6s - loss: 3.3304 - rpn_class_loss: 0.0704 - rpn_bbox_loss: 2.1768 - mrcnn_class_loss: 0.2411 - mrcnn_bbox_loss: 0.4357 - mrcnn_mask_loss: 0.4064 "
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/anaconda/envs/py36/lib/python3.6/site-packages/keras/engine/training.py:2330: UserWarning: Using a generator with `use_multiprocessing=True` and multiple workers may duplicate your data. Please consider using the`keras.utils.Sequence class.\n",
      "  UserWarning('Using a generator with `use_multiprocessing=True`'\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "453/453 [==============================] - 4637s 10s/step - loss: 3.3288 - rpn_class_loss: 0.0703 - rpn_bbox_loss: 2.1753 - mrcnn_class_loss: 0.2415 - mrcnn_bbox_loss: 0.4355 - mrcnn_mask_loss: 0.4062 - val_loss: 2.3134 - val_rpn_class_loss: 0.0324 - val_rpn_bbox_loss: 1.3400 - val_mrcnn_class_loss: 0.2917 - val_mrcnn_bbox_loss: 0.3078 - val_mrcnn_mask_loss: 0.3414\n",
      "Epoch 2/20\n",
      "453/453 [==============================] - 3981s 9s/step - loss: 2.0230 - rpn_class_loss: 0.0291 - rpn_bbox_loss: 1.1931 - mrcnn_class_loss: 0.2000 - mrcnn_bbox_loss: 0.2759 - mrcnn_mask_loss: 0.3248 - val_loss: 1.8315 - val_rpn_class_loss: 0.0270 - val_rpn_bbox_loss: 1.0678 - val_mrcnn_class_loss: 0.1728 - val_mrcnn_bbox_loss: 0.2505 - val_mrcnn_mask_loss: 0.3134\n",
      "Epoch 3/20\n",
      "453/453 [==============================] - 3887s 9s/step - loss: 1.9333 - rpn_class_loss: 0.0261 - rpn_bbox_loss: 1.1715 - mrcnn_class_loss: 0.1803 - mrcnn_bbox_loss: 0.2490 - mrcnn_mask_loss: 0.3063 - val_loss: 1.7927 - val_rpn_class_loss: 0.0280 - val_rpn_bbox_loss: 1.0507 - val_mrcnn_class_loss: 0.1736 - val_mrcnn_bbox_loss: 0.2293 - val_mrcnn_mask_loss: 0.3112\n",
      "Epoch 4/20\n",
      "452/453 [============================>.] - ETA: 4s - loss: 1.7203 - rpn_class_loss: 0.0236 - rpn_bbox_loss: 1.0237 - mrcnn_class_loss: 0.1616 - mrcnn_bbox_loss: 0.2162 - mrcnn_mask_loss: 0.2953"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "ERROR:root:Error processing image {'id': 'ISIC_0015109', 'source': 'lesion', 'path': 'data/test/images/ISIC_0015109.jpg'}\n",
      "Traceback (most recent call last):\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/model.py\", line 1709, in data_generator\n",
      "    use_mini_mask=config.USE_MINI_MASK)\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/model.py\", line 1219, in load_image_gt\n",
      "    mode=config.IMAGE_RESIZE_MODE)\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/utils.py\", line 448, in resize_image\n",
      "    preserve_range=True)\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/utils.py\", line 903, in resize\n",
      "    anti_aliasing_sigma=anti_aliasing_sigma)\n",
      "  File \"/anaconda/envs/py36/lib/python3.6/site-packages/skimage/transform/_warps.py\", line 179, in resize\n",
      "    preserve_range=preserve_range)\n",
      "  File \"/anaconda/envs/py36/lib/python3.6/site-packages/skimage/transform/_warps.py\", line 871, in warp\n",
      "    order=order, mode=mode, cval=cval))\n",
      "  File \"skimage/transform/_warps_cy.pyx\", line 138, in skimage.transform._warps_cy._warp_fast\n",
      "MemoryErrorERROR:root:Error processing image {'id': 'ISIC_0015142', 'source': 'lesion', 'path': 'data/train/images/ISIC_0015142.jpg'}\n",
      "Traceback (most recent call last):\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/model.py\", line 1709, in data_generator\n",
      "    use_mini_mask=config.USE_MINI_MASK)\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/model.py\", line 1219, in load_image_gt\n",
      "    mode=config.IMAGE_RESIZE_MODE)\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/utils.py\", line 448, in resize_image\n",
      "    preserve_range=True)\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/utils.py\", line 903, in resize\n",
      "    anti_aliasing_sigma=anti_aliasing_sigma)\n",
      "  File \"/anaconda/envs/py36/lib/python3.6/site-packages/skimage/transform/_warps.py\", line 179, in resize\n",
      "    preserve_range=preserve_range)\n",
      "  File \"/anaconda/envs/py36/lib/python3.6/site-packages/skimage/transform/_warps.py\", line 872, in warp\n",
      "    warped = np.dstack(dims)\n",
      "  File \"/anaconda/envs/py36/lib/python3.6/site-packages/numpy/lib/shape_base.py\", line 699, in dstack\n",
      "    return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)\n",
      "MemoryErrorERROR:root:Error processing image {'id': 'ISIC_0013360', 'source': 'lesion', 'path': 'data/test/images/ISIC_0013360.jpg'}\n",
      "Traceback (most recent call last):\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/model.py\", line 1709, in data_generator\n",
      "    use_mini_mask=config.USE_MINI_MASK)\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/model.py\", line 1219, in load_image_gt\n",
      "    mode=config.IMAGE_RESIZE_MODE)\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/utils.py\", line 448, in resize_image\n",
      "    preserve_range=True)\n",
      "  File \"/data/home/louisli/notebooks/ISIC/mrcnn/utils.py\", line 903, in resize\n",
      "    anti_aliasing_sigma=anti_aliasing_sigma)\n",
      "  File \"/anaconda/envs/py36/lib/python3.6/site-packages/skimage/transform/_warps.py\", line 179, in resize\n",
      "    preserve_range=preserve_range)\n",
      "  File \"/anaconda/envs/py36/lib/python3.6/site-packages/skimage/transform/_warps.py\", line 872, in warp\n",
      "    warped = np.dstack(dims)\n",
      "  File \"/anaconda/envs/py36/lib/python3.6/site-packages/numpy/lib/shape_base.py\", line 699, in dstack\n",
      "    return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)\n",
      "MemoryError\n",
      "\n",
      "\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "453/453 [==============================] - 4101s 9s/step - loss: 1.7183 - rpn_class_loss: 0.0236 - rpn_bbox_loss: 1.0226 - mrcnn_class_loss: 0.1614 - mrcnn_bbox_loss: 0.2158 - mrcnn_mask_loss: 0.2948 - val_loss: 1.7569 - val_rpn_class_loss: 0.0210 - val_rpn_bbox_loss: 1.0770 - val_mrcnn_class_loss: 0.1541 - val_mrcnn_bbox_loss: 0.2038 - val_mrcnn_mask_loss: 0.3011\n",
      "Epoch 5/20\n",
      "453/453 [==============================] - 4056s 9s/step - loss: 1.7344 - rpn_class_loss: 0.0219 - rpn_bbox_loss: 1.0463 - mrcnn_class_loss: 0.1540 - mrcnn_bbox_loss: 0.2141 - mrcnn_mask_loss: 0.2980 - val_loss: 1.5423 - val_rpn_class_loss: 0.0204 - val_rpn_bbox_loss: 0.8989 - val_mrcnn_class_loss: 0.1473 - val_mrcnn_bbox_loss: 0.1869 - val_mrcnn_mask_loss: 0.2888\n",
      "Epoch 6/20\n",
      "453/453 [==============================] - 4096s 9s/step - loss: 1.6309 - rpn_class_loss: 0.0226 - rpn_bbox_loss: 0.9805 - mrcnn_class_loss: 0.1477 - mrcnn_bbox_loss: 0.1926 - mrcnn_mask_loss: 0.2875 - val_loss: 1.5830 - val_rpn_class_loss: 0.0191 - val_rpn_bbox_loss: 0.9546 - val_mrcnn_class_loss: 0.1338 - val_mrcnn_bbox_loss: 0.2018 - val_mrcnn_mask_loss: 0.2737\n",
      "Epoch 7/20\n",
      "453/453 [==============================] - 4020s 9s/step - loss: 1.6171 - rpn_class_loss: 0.0201 - rpn_bbox_loss: 0.9731 - mrcnn_class_loss: 0.1509 - mrcnn_bbox_loss: 0.1870 - mrcnn_mask_loss: 0.2860 - val_loss: 1.5433 - val_rpn_class_loss: 0.0214 - val_rpn_bbox_loss: 0.9253 - val_mrcnn_class_loss: 0.1559 - val_mrcnn_bbox_loss: 0.1790 - val_mrcnn_mask_loss: 0.2617\n",
      "Epoch 8/20\n",
      "453/453 [==============================] - 3978s 9s/step - loss: 1.6014 - rpn_class_loss: 0.0204 - rpn_bbox_loss: 0.9683 - mrcnn_class_loss: 0.1508 - mrcnn_bbox_loss: 0.1772 - mrcnn_mask_loss: 0.2846 - val_loss: 1.5732 - val_rpn_class_loss: 0.0189 - val_rpn_bbox_loss: 0.9124 - val_mrcnn_class_loss: 0.1522 - val_mrcnn_bbox_loss: 0.2034 - val_mrcnn_mask_loss: 0.2862\n",
      "Epoch 9/20\n",
      "453/453 [==============================] - 3995s 9s/step - loss: 1.6897 - rpn_class_loss: 0.0210 - rpn_bbox_loss: 1.0435 - mrcnn_class_loss: 0.1519 - mrcnn_bbox_loss: 0.1906 - mrcnn_mask_loss: 0.2828 - val_loss: 1.8404 - val_rpn_class_loss: 0.0191 - val_rpn_bbox_loss: 1.1490 - val_mrcnn_class_loss: 0.2059 - val_mrcnn_bbox_loss: 0.1920 - val_mrcnn_mask_loss: 0.2743\n",
      "Epoch 10/20\n",
      "453/453 [==============================] - 4107s 9s/step - loss: 1.5090 - rpn_class_loss: 0.0199 - rpn_bbox_loss: 0.9243 - mrcnn_class_loss: 0.1384 - mrcnn_bbox_loss: 0.1633 - mrcnn_mask_loss: 0.2631 - val_loss: 1.5205 - val_rpn_class_loss: 0.0165 - val_rpn_bbox_loss: 0.8916 - val_mrcnn_class_loss: 0.1559 - val_mrcnn_bbox_loss: 0.1836 - val_mrcnn_mask_loss: 0.2729\n",
      "Epoch 11/20\n",
      "453/453 [==============================] - 4047s 9s/step - loss: 1.4830 - rpn_class_loss: 0.0162 - rpn_bbox_loss: 0.8989 - mrcnn_class_loss: 0.1400 - mrcnn_bbox_loss: 0.1692 - mrcnn_mask_loss: 0.2587 - val_loss: 1.5687 - val_rpn_class_loss: 0.0222 - val_rpn_bbox_loss: 0.9946 - val_mrcnn_class_loss: 0.1342 - val_mrcnn_bbox_loss: 0.1631 - val_mrcnn_mask_loss: 0.2545\n",
      "Epoch 12/20\n",
      "453/453 [==============================] - 3998s 9s/step - loss: 1.5291 - rpn_class_loss: 0.0190 - rpn_bbox_loss: 0.9328 - mrcnn_class_loss: 0.1449 - mrcnn_bbox_loss: 0.1678 - mrcnn_mask_loss: 0.2645 - val_loss: 1.7164 - val_rpn_class_loss: 0.0186 - val_rpn_bbox_loss: 1.0686 - val_mrcnn_class_loss: 0.1567 - val_mrcnn_bbox_loss: 0.1880 - val_mrcnn_mask_loss: 0.2844\n",
      "Epoch 13/20\n",
      "453/453 [==============================] - 4082s 9s/step - loss: 1.4791 - rpn_class_loss: 0.0184 - rpn_bbox_loss: 0.8967 - mrcnn_class_loss: 0.1432 - mrcnn_bbox_loss: 0.1662 - mrcnn_mask_loss: 0.2544 - val_loss: 1.4595 - val_rpn_class_loss: 0.0192 - val_rpn_bbox_loss: 0.8662 - val_mrcnn_class_loss: 0.1434 - val_mrcnn_bbox_loss: 0.1625 - val_mrcnn_mask_loss: 0.2682\n",
      "Epoch 14/20\n",
      "453/453 [==============================] - 4023s 9s/step - loss: 1.5006 - rpn_class_loss: 0.0166 - rpn_bbox_loss: 0.8993 - mrcnn_class_loss: 0.1468 - mrcnn_bbox_loss: 0.1646 - mrcnn_mask_loss: 0.2732 - val_loss: 1.3970 - val_rpn_class_loss: 0.0173 - val_rpn_bbox_loss: 0.7600 - val_mrcnn_class_loss: 0.1642 - val_mrcnn_bbox_loss: 0.1772 - val_mrcnn_mask_loss: 0.2783\n",
      "Epoch 15/20\n",
      "453/453 [==============================] - 4074s 9s/step - loss: 1.4438 - rpn_class_loss: 0.0166 - rpn_bbox_loss: 0.8666 - mrcnn_class_loss: 0.1451 - mrcnn_bbox_loss: 0.1617 - mrcnn_mask_loss: 0.2538 - val_loss: 1.4439 - val_rpn_class_loss: 0.0160 - val_rpn_bbox_loss: 0.8438 - val_mrcnn_class_loss: 0.1453 - val_mrcnn_bbox_loss: 0.1668 - val_mrcnn_mask_loss: 0.2720\n",
      "Epoch 16/20\n",
      "453/453 [==============================] - 4119s 9s/step - loss: 1.4879 - rpn_class_loss: 0.0155 - rpn_bbox_loss: 0.8958 - mrcnn_class_loss: 0.1467 - mrcnn_bbox_loss: 0.1700 - mrcnn_mask_loss: 0.2599 - val_loss: 1.4443 - val_rpn_class_loss: 0.0150 - val_rpn_bbox_loss: 0.8508 - val_mrcnn_class_loss: 0.1642 - val_mrcnn_bbox_loss: 0.1641 - val_mrcnn_mask_loss: 0.2503\n",
      "Epoch 17/20\n",
      "453/453 [==============================] - 3867s 9s/step - loss: 1.4261 - rpn_class_loss: 0.0156 - rpn_bbox_loss: 0.8411 - mrcnn_class_loss: 0.1480 - mrcnn_bbox_loss: 0.1597 - mrcnn_mask_loss: 0.2618 - val_loss: 1.3676 - val_rpn_class_loss: 0.0134 - val_rpn_bbox_loss: 0.7983 - val_mrcnn_class_loss: 0.1451 - val_mrcnn_bbox_loss: 0.1472 - val_mrcnn_mask_loss: 0.2635\n",
      "Epoch 18/20\n",
      "453/453 [==============================] - 4087s 9s/step - loss: 1.4447 - rpn_class_loss: 0.0166 - rpn_bbox_loss: 0.8828 - mrcnn_class_loss: 0.1425 - mrcnn_bbox_loss: 0.1509 - mrcnn_mask_loss: 0.2520 - val_loss: 1.4566 - val_rpn_class_loss: 0.0155 - val_rpn_bbox_loss: 0.8518 - val_mrcnn_class_loss: 0.1643 - val_mrcnn_bbox_loss: 0.1631 - val_mrcnn_mask_loss: 0.2618\n",
      "Epoch 19/20\n",
      "453/453 [==============================] - 3908s 9s/step - loss: 1.3848 - rpn_class_loss: 0.0155 - rpn_bbox_loss: 0.8105 - mrcnn_class_loss: 0.1466 - mrcnn_bbox_loss: 0.1539 - mrcnn_mask_loss: 0.2583 - val_loss: 1.3136 - val_rpn_class_loss: 0.0139 - val_rpn_bbox_loss: 0.7737 - val_mrcnn_class_loss: 0.1352 - val_mrcnn_bbox_loss: 0.1485 - val_mrcnn_mask_loss: 0.2424\n",
      "Epoch 20/20\n",
      "453/453 [==============================] - 3924s 9s/step - loss: 1.3642 - rpn_class_loss: 0.0137 - rpn_bbox_loss: 0.7993 - mrcnn_class_loss: 0.1469 - mrcnn_bbox_loss: 0.1524 - mrcnn_mask_loss: 0.2520 - val_loss: 1.4399 - val_rpn_class_loss: 0.0166 - val_rpn_bbox_loss: 0.8332 - val_mrcnn_class_loss: 0.1498 - val_mrcnn_bbox_loss: 0.1733 - val_mrcnn_mask_loss: 0.2671\n",
      "Train all layers\n",
      "\n",
      "Starting at epoch 20. LR=0.001\n",
      "\n",
      "Checkpoint Path: mask_rcnn_logs/lesion20190424T0423/mask_rcnn_lesion_{epoch:04d}.h5\n",
      "Selecting layers to train\n",
      "conv1                  (Conv2D)\n",
      "bn_conv1               (BatchNorm)\n",
      "res2a_branch2a         (Conv2D)\n",
      "bn2a_branch2a          (BatchNorm)\n",
      "res2a_branch2b         (Conv2D)\n",
      "bn2a_branch2b          (BatchNorm)\n",
      "res2a_branch2c         (Conv2D)\n",
      "res2a_branch1          (Conv2D)\n",
      "bn2a_branch2c          (BatchNorm)\n",
      "bn2a_branch1           (BatchNorm)\n",
      "res2b_branch2a         (Conv2D)\n",
      "bn2b_branch2a          (BatchNorm)\n",
      "res2b_branch2b         (Conv2D)\n",
      "bn2b_branch2b          (BatchNorm)\n",
      "res2b_branch2c         (Conv2D)\n",
      "bn2b_branch2c          (BatchNorm)\n",
      "res2c_branch2a         (Conv2D)\n",
      "bn2c_branch2a          (BatchNorm)\n",
      "res2c_branch2b         (Conv2D)\n",
      "bn2c_branch2b          (BatchNorm)\n",
      "res2c_branch2c         (Conv2D)\n",
      "bn2c_branch2c          (BatchNorm)\n",
      "res3a_branch2a         (Conv2D)\n",
      "bn3a_branch2a          (BatchNorm)\n",
      "res3a_branch2b         (Conv2D)\n",
      "bn3a_branch2b          (BatchNorm)\n",
      "res3a_branch2c         (Conv2D)\n",
      "res3a_branch1          (Conv2D)\n",
      "bn3a_branch2c          (BatchNorm)\n",
      "bn3a_branch1           (BatchNorm)\n",
      "res3b_branch2a         (Conv2D)\n",
      "bn3b_branch2a          (BatchNorm)\n",
      "res3b_branch2b         (Conv2D)\n",
      "bn3b_branch2b          (BatchNorm)\n",
      "res3b_branch2c         (Conv2D)\n",
      "bn3b_branch2c          (BatchNorm)\n",
      "res3c_branch2a         (Conv2D)\n",
      "bn3c_branch2a          (BatchNorm)\n",
      "res3c_branch2b         (Conv2D)\n",
      "bn3c_branch2b          (BatchNorm)\n",
      "res3c_branch2c         (Conv2D)\n",
      "bn3c_branch2c          (BatchNorm)\n",
      "res3d_branch2a         (Conv2D)\n",
      "bn3d_branch2a          (BatchNorm)\n",
      "res3d_branch2b         (Conv2D)\n",
      "bn3d_branch2b          (BatchNorm)\n",
      "res3d_branch2c         (Conv2D)\n",
      "bn3d_branch2c          (BatchNorm)\n",
      "res4a_branch2a         (Conv2D)\n",
      "bn4a_branch2a          (BatchNorm)\n",
      "res4a_branch2b         (Conv2D)\n",
      "bn4a_branch2b          (BatchNorm)\n",
      "res4a_branch2c         (Conv2D)\n",
      "res4a_branch1          (Conv2D)\n",
      "bn4a_branch2c          (BatchNorm)\n",
      "bn4a_branch1           (BatchNorm)\n",
      "res4b_branch2a         (Conv2D)\n",
      "bn4b_branch2a          (BatchNorm)\n",
      "res4b_branch2b         (Conv2D)\n",
      "bn4b_branch2b          (BatchNorm)\n",
      "res4b_branch2c         (Conv2D)\n",
      "bn4b_branch2c          (BatchNorm)\n",
      "res4c_branch2a         (Conv2D)\n",
      "bn4c_branch2a          (BatchNorm)\n",
      "res4c_branch2b         (Conv2D)\n",
      "bn4c_branch2b          (BatchNorm)\n",
      "res4c_branch2c         (Conv2D)\n",
      "bn4c_branch2c          (BatchNorm)\n",
      "res4d_branch2a         (Conv2D)\n",
      "bn4d_branch2a          (BatchNorm)\n",
      "res4d_branch2b         (Conv2D)\n",
      "bn4d_branch2b          (BatchNorm)\n",
      "res4d_branch2c         (Conv2D)\n",
      "bn4d_branch2c          (BatchNorm)\n",
      "res4e_branch2a         (Conv2D)\n",
      "bn4e_branch2a          (BatchNorm)\n",
      "res4e_branch2b         (Conv2D)\n",
      "bn4e_branch2b          (BatchNorm)\n",
      "res4e_branch2c         (Conv2D)\n",
      "bn4e_branch2c          (BatchNorm)\n",
      "res4f_branch2a         (Conv2D)\n",
      "bn4f_branch2a          (BatchNorm)\n",
      "res4f_branch2b         (Conv2D)\n",
      "bn4f_branch2b          (BatchNorm)\n",
      "res4f_branch2c         (Conv2D)\n",
      "bn4f_branch2c          (BatchNorm)\n",
      "res5a_branch2a         (Conv2D)\n",
      "bn5a_branch2a          (BatchNorm)\n",
      "res5a_branch2b         (Conv2D)\n",
      "bn5a_branch2b          (BatchNorm)\n",
      "res5a_branch2c         (Conv2D)\n",
      "res5a_branch1          (Conv2D)\n",
      "bn5a_branch2c          (BatchNorm)\n",
      "bn5a_branch1           (BatchNorm)\n",
      "res5b_branch2a         (Conv2D)\n",
      "bn5b_branch2a          (BatchNorm)\n",
      "res5b_branch2b         (Conv2D)\n",
      "bn5b_branch2b          (BatchNorm)\n",
      "res5b_branch2c         (Conv2D)\n",
      "bn5b_branch2c          (BatchNorm)\n",
      "res5c_branch2a         (Conv2D)\n",
      "bn5c_branch2a          (BatchNorm)\n",
      "res5c_branch2b         (Conv2D)\n",
      "bn5c_branch2b          (BatchNorm)\n",
      "res5c_branch2c         (Conv2D)\n",
      "bn5c_branch2c          (BatchNorm)\n",
      "fpn_c5p5               (Conv2D)\n",
      "fpn_c4p4               (Conv2D)\n",
      "fpn_c3p3               (Conv2D)\n",
      "fpn_c2p2               (Conv2D)\n",
      "fpn_p5                 (Conv2D)\n",
      "fpn_p2                 (Conv2D)\n",
      "fpn_p3                 (Conv2D)\n",
      "fpn_p4                 (Conv2D)\n",
      "In model:  rpn_model\n",
      "    rpn_conv_shared        (Conv2D)\n",
      "    rpn_class_raw          (Conv2D)\n",
      "    rpn_bbox_pred          (Conv2D)\n",
      "mrcnn_mask_conv1       (TimeDistributed)\n",
      "mrcnn_mask_bn1         (TimeDistributed)\n",
      "mrcnn_mask_conv2       (TimeDistributed)\n",
      "mrcnn_mask_bn2         (TimeDistributed)\n",
      "mrcnn_class_conv1      (TimeDistributed)\n",
      "mrcnn_class_bn1        (TimeDistributed)\n",
      "mrcnn_mask_conv3       (TimeDistributed)\n",
      "mrcnn_mask_bn3         (TimeDistributed)\n",
      "mrcnn_class_conv2      (TimeDistributed)\n",
      "mrcnn_class_bn2        (TimeDistributed)\n",
      "mrcnn_mask_conv4       (TimeDistributed)\n",
      "mrcnn_mask_bn4         (TimeDistributed)\n",
      "mrcnn_bbox_fc          (TimeDistributed)\n",
      "mrcnn_mask_deconv      (TimeDistributed)\n",
      "mrcnn_class_logits     (TimeDistributed)\n",
      "mrcnn_mask             (TimeDistributed)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 21/40\n",
      "453/453 [==============================] - 4677s 10s/step - loss: 1.3777 - rpn_class_loss: 0.0134 - rpn_bbox_loss: 0.8174 - mrcnn_class_loss: 0.1383 - mrcnn_bbox_loss: 0.1443 - mrcnn_mask_loss: 0.2643 - val_loss: 1.3521 - val_rpn_class_loss: 0.0117 - val_rpn_bbox_loss: 0.8268 - val_mrcnn_class_loss: 0.1303 - val_mrcnn_bbox_loss: 0.1360 - val_mrcnn_mask_loss: 0.2474\n",
      "Epoch 22/40\n",
      "453/453 [==============================] - 3976s 9s/step - loss: 1.2581 - rpn_class_loss: 0.0131 - rpn_bbox_loss: 0.7155 - mrcnn_class_loss: 0.1357 - mrcnn_bbox_loss: 0.1428 - mrcnn_mask_loss: 0.2509 - val_loss: 1.2161 - val_rpn_class_loss: 0.0131 - val_rpn_bbox_loss: 0.6928 - val_mrcnn_class_loss: 0.1346 - val_mrcnn_bbox_loss: 0.1246 - val_mrcnn_mask_loss: 0.2511\n",
      "Epoch 23/40\n",
      "453/453 [==============================] - 4125s 9s/step - loss: 1.2870 - rpn_class_loss: 0.0121 - rpn_bbox_loss: 0.7415 - mrcnn_class_loss: 0.1387 - mrcnn_bbox_loss: 0.1416 - mrcnn_mask_loss: 0.2531 - val_loss: 1.3576 - val_rpn_class_loss: 0.0113 - val_rpn_bbox_loss: 0.8235 - val_mrcnn_class_loss: 0.1295 - val_mrcnn_bbox_loss: 0.1394 - val_mrcnn_mask_loss: 0.2540\n",
      "Epoch 24/40\n",
      "453/453 [==============================] - 4119s 9s/step - loss: 1.1944 - rpn_class_loss: 0.0114 - rpn_bbox_loss: 0.6654 - mrcnn_class_loss: 0.1436 - mrcnn_bbox_loss: 0.1328 - mrcnn_mask_loss: 0.2413 - val_loss: 1.1632 - val_rpn_class_loss: 0.0098 - val_rpn_bbox_loss: 0.6118 - val_mrcnn_class_loss: 0.1511 - val_mrcnn_bbox_loss: 0.1403 - val_mrcnn_mask_loss: 0.2501\n",
      "Epoch 25/40\n",
      "453/453 [==============================] - 3998s 9s/step - loss: 1.1532 - rpn_class_loss: 0.0111 - rpn_bbox_loss: 0.6383 - mrcnn_class_loss: 0.1319 - mrcnn_bbox_loss: 0.1310 - mrcnn_mask_loss: 0.2408 - val_loss: 1.1331 - val_rpn_class_loss: 0.0088 - val_rpn_bbox_loss: 0.6501 - val_mrcnn_class_loss: 0.1011 - val_mrcnn_bbox_loss: 0.1217 - val_mrcnn_mask_loss: 0.2514\n",
      "Epoch 26/40\n",
      "453/453 [==============================] - 4021s 9s/step - loss: 1.1788 - rpn_class_loss: 0.0105 - rpn_bbox_loss: 0.6701 - mrcnn_class_loss: 0.1210 - mrcnn_bbox_loss: 0.1334 - mrcnn_mask_loss: 0.2438 - val_loss: 1.1778 - val_rpn_class_loss: 0.0122 - val_rpn_bbox_loss: 0.6527 - val_mrcnn_class_loss: 0.1156 - val_mrcnn_bbox_loss: 0.1387 - val_mrcnn_mask_loss: 0.2587\n",
      "Epoch 27/40\n",
      "453/453 [==============================] - 3991s 9s/step - loss: 1.1534 - rpn_class_loss: 0.0097 - rpn_bbox_loss: 0.6382 - mrcnn_class_loss: 0.1186 - mrcnn_bbox_loss: 0.1373 - mrcnn_mask_loss: 0.2495 - val_loss: 1.1109 - val_rpn_class_loss: 0.0086 - val_rpn_bbox_loss: 0.5898 - val_mrcnn_class_loss: 0.1057 - val_mrcnn_bbox_loss: 0.1463 - val_mrcnn_mask_loss: 0.2606\n",
      "Epoch 28/40\n",
      "453/453 [==============================] - 3968s 9s/step - loss: 1.1074 - rpn_class_loss: 0.0096 - rpn_bbox_loss: 0.6142 - mrcnn_class_loss: 0.1048 - mrcnn_bbox_loss: 0.1317 - mrcnn_mask_loss: 0.2471 - val_loss: 1.0764 - val_rpn_class_loss: 0.0096 - val_rpn_bbox_loss: 0.6027 - val_mrcnn_class_loss: 0.1013 - val_mrcnn_bbox_loss: 0.1216 - val_mrcnn_mask_loss: 0.2412\n",
      "Epoch 30/40\n",
      "453/453 [==============================] - 3981s 9s/step - loss: 1.1390 - rpn_class_loss: 0.0115 - rpn_bbox_loss: 0.6272 - mrcnn_class_loss: 0.1057 - mrcnn_bbox_loss: 0.1366 - mrcnn_mask_loss: 0.2581 - val_loss: 1.1883 - val_rpn_class_loss: 0.0119 - val_rpn_bbox_loss: 0.6205 - val_mrcnn_class_loss: 0.1371 - val_mrcnn_bbox_loss: 0.1474 - val_mrcnn_mask_loss: 0.2714\n",
      "Epoch 31/40\n",
      "453/453 [==============================] - 4031s 9s/step - loss: 1.0889 - rpn_class_loss: 0.0113 - rpn_bbox_loss: 0.6039 - mrcnn_class_loss: 0.1045 - mrcnn_bbox_loss: 0.1305 - mrcnn_mask_loss: 0.2385 - val_loss: 1.0694 - val_rpn_class_loss: 0.0078 - val_rpn_bbox_loss: 0.5682 - val_mrcnn_class_loss: 0.1114 - val_mrcnn_bbox_loss: 0.1337 - val_mrcnn_mask_loss: 0.2483\n",
      "Epoch 32/40\n",
      "453/453 [==============================] - 4092s 9s/step - loss: 1.1019 - rpn_class_loss: 0.0093 - rpn_bbox_loss: 0.5992 - mrcnn_class_loss: 0.1022 - mrcnn_bbox_loss: 0.1385 - mrcnn_mask_loss: 0.2528 - val_loss: 1.1269 - val_rpn_class_loss: 0.0072 - val_rpn_bbox_loss: 0.6708 - val_mrcnn_class_loss: 0.0668 - val_mrcnn_bbox_loss: 0.1293 - val_mrcnn_mask_loss: 0.2528\n",
      "Epoch 33/40\n",
      "453/453 [==============================] - 4019s 9s/step - loss: 1.0462 - rpn_class_loss: 0.0080 - rpn_bbox_loss: 0.5655 - mrcnn_class_loss: 0.0943 - mrcnn_bbox_loss: 0.1298 - mrcnn_mask_loss: 0.2486 - val_loss: 1.1757 - val_rpn_class_loss: 0.0068 - val_rpn_bbox_loss: 0.6548 - val_mrcnn_class_loss: 0.1020 - val_mrcnn_bbox_loss: 0.1446 - val_mrcnn_mask_loss: 0.2675\n",
      "Epoch 34/40\n",
      "453/453 [==============================] - 3987s 9s/step - loss: 1.0943 - rpn_class_loss: 0.0107 - rpn_bbox_loss: 0.6056 - mrcnn_class_loss: 0.0970 - mrcnn_bbox_loss: 0.1316 - mrcnn_mask_loss: 0.2495 - val_loss: 1.0386 - val_rpn_class_loss: 0.0074 - val_rpn_bbox_loss: 0.5500 - val_mrcnn_class_loss: 0.0937 - val_mrcnn_bbox_loss: 0.1323 - val_mrcnn_mask_loss: 0.2552\n",
      "Epoch 35/40\n",
      "453/453 [==============================] - 4019s 9s/step - loss: 1.0653 - rpn_class_loss: 0.0087 - rpn_bbox_loss: 0.5797 - mrcnn_class_loss: 0.0928 - mrcnn_bbox_loss: 0.1345 - mrcnn_mask_loss: 0.2496 - val_loss: 1.0426 - val_rpn_class_loss: 0.0073 - val_rpn_bbox_loss: 0.5709 - val_mrcnn_class_loss: 0.0976 - val_mrcnn_bbox_loss: 0.1256 - val_mrcnn_mask_loss: 0.2411\n",
      "Epoch 36/40\n",
      "362/453 [======================>.......] - ETA: 7:04 - loss: 1.0115 - rpn_class_loss: 0.0083 - rpn_bbox_loss: 0.5555 - mrcnn_class_loss: 0.0877 - mrcnn_bbox_loss: 0.1235 - mrcnn_mask_loss: 0.2365"
     ]
    }
   ],
   "source": [
    "command = 'train'  #or detect\n",
    "dataset = 'data'\n",
    "weights = 'coco'\n",
    "logs = 'mask_rcnn_logs'\n",
    "\n",
    "# Configurations\n",
    "if command == \"train\":\n",
    "    config = LesionConfig()\n",
    "else:\n",
    "    config = LesionInferenceConfig()\n",
    "config.display()\n",
    "\n",
    "# Create model\n",
    "if command == \"train\":\n",
    "    model = modellib.MaskRCNN(mode=\"training\", config=config, model_dir=logs)\n",
    "else:\n",
    "    model = modellib.MaskRCNN(mode=\"inference\", config=config, model_dir=logs)\n",
    "\n",
    "# Select weights file to load\n",
    "if weights.lower() == \"coco\":\n",
    "    weights_path = COCO_WEIGHTS_PATH\n",
    "    # Download weights file\n",
    "    if not os.path.exists(weights_path):\n",
    "        utils.download_trained_weights(weights_path)\n",
    "elif weights.lower() == \"last\":\n",
    "    # Find last trained weights\n",
    "    weights_path = model.find_last()\n",
    "elif weights.lower() == \"imagenet\":\n",
    "    # Start from ImageNet trained weights\n",
    "    weights_path = model.get_imagenet_weights()\n",
    "else:\n",
    "    weights_path = weights\n",
    "\n",
    "# Load weights\n",
    "print(\"Loading weights \", weights_path)\n",
    "if weights.lower() == \"coco\":\n",
    "    # Exclude the last layers because they require a matching\n",
    "    # number of classes\n",
    "    model.load_weights(weights_path, by_name=True, exclude=[\n",
    "        \"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n",
    "        \"mrcnn_bbox\", \"mrcnn_mask\"])\n",
    "else:\n",
    "    model.load_weights(weights_path, by_name=True)\n",
    "\n",
    "# Train or evaluate\n",
    "if command == \"train\":\n",
    "    train(model, dataset)\n",
    "    #detect(model, dataset, args.subset)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
