{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "accelerator": "GPU",
    "colab": {
      "name": "tutorial.ipynb",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "V8-yl-s-WKMG"
      },
      "source": [
        "# EfficientDet Tutorial: inference, eval, and training \n",
        "\n",
        "\n",
        "\n",
        "<table align=\"left\"><td>\n",
        "  <a target=\"_blank\"  href=\"https://github.com/google/automl/blob/master/efficientdet/tf2/tutorial.ipynb\">\n",
        "    <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on github\n",
        "  </a>\n",
        "</td><td>\n",
        "  <a target=\"_blank\"  href=\"https://colab.sandbox.google.com/github/google/automl/blob/master/efficientdet/tf2/tutorial.ipynb\">\n",
        "    <img width=32px src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
        "</td></table>"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "muwOCNHaq85j"
      },
      "source": [
        "# 0. Install and view graph."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "dggLVarNxxvC"
      },
      "source": [
        "## 0.1 Install package and download source code/image.\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "hGL97-GXjSUw"
      },
      "source": [
        "%%capture\n",
        "#@title\n",
        "import os\n",
        "import sys\n",
        "import tensorflow as tf\n",
        "\n",
        "# Download source code.\n",
        "if \"efficientdet\" not in os.getcwd():\n",
        "  !git clone --depth 1 https://github.com/google/automl\n",
        "  os.chdir('automl/efficientdet')\n",
        "  sys.path.append('.')\n",
        "  !pip install -r requirements.txt\n",
        "  !pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'\n",
        "else:\n",
        "  !git pull"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Tow-ic7H3d7i"
      },
      "source": [
        "MODEL = 'efficientdet-lite0'  #@param\n",
        "\n",
        "def download(m):\n",
        "  if m not in os.listdir():\n",
        "    if m.find('lite'):\n",
        "      !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/{m}.tgz\n",
        "      !tar zxf {m}.tgz\n",
        "    else:\n",
        "      !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/{m}.tar.gz\n",
        "      !tar zxf {m}.tar.gz\n",
        "  ckpt_path = os.path.join(os.getcwd(), m)\n",
        "  return ckpt_path\n",
        "\n",
        "# Download checkpoint.\n",
        "ckpt_path = download(MODEL)\n",
        "print('Use model in {}'.format(ckpt_path))\n",
        "\n",
        "# Prepare image and visualization settings.\n",
        "image_url =  'https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png'#@param\n",
        "image_name = 'img.png' #@param\n",
        "!wget {image_url} -O img.png\n",
        "import os\n",
        "img_path = os.path.join(os.getcwd(), 'img.png')\n",
        "\n",
        "min_score_thresh = 0.35  #@param\n",
        "max_boxes_to_draw = 200  #@param\n",
        "line_thickness =   2#@param\n",
        "\n",
        "import PIL\n",
        "# Get the largest of height/width and round to 128.\n",
        "image_size = max(PIL.Image.open(img_path).size)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "GvdjcYpUVuQ5"
      },
      "source": [
        "## 0.2 View graph in TensorBoard"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "U2oz3r1LUDzr"
      },
      "source": [
        "!python -m model_inspect --model_name={MODEL} --logdir=logs &> /dev/null\n",
        "%load_ext tensorboard\n",
        "%tensorboard --logdir logs"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "vZk2dwOxrGhY"
      },
      "source": [
        "# 1. inference"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_VaF_j7jdVCK"
      },
      "source": [
        "## 1.1 Benchmark network latency\n",
        "There are two types of latency:\n",
        "network latency and end-to-end latency.\n",
        "\n",
        "*   network latency: from the first conv op to the network class and box prediction.\n",
        "*   end-to-end latency: from image preprocessing, network, to the final postprocessing to generate a annotated new image.\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "R_3gL01UbDLH"
      },
      "source": [
        "# benchmark network latency\n",
        "!python -m tf2.inspector --mode=benchmark --model_name={MODEL} --hparams=\"mixed_precision=true\" --only_network\n",
        "\n",
        "# With colab + Tesla T4 GPU, here are the batch size 1 latency summary:\n",
        "# D0 (AP=33.5):  14.9ms,  FPS = 67.2   (batch size 8 FPS=)\n",
        "# D1 (AP=39.6):  22.7ms,  FPS = 44.1   (batch size 8 FPS=)\n",
        "# D2 (AP=43.0):  27.9ms,  FPS = 35.8   (batch size 8 FPS=)\n",
        "# D3 (AP=45.8):  48.1ms,  FPS = 20.8   (batch size 8 FPS=)\n",
        "# D4 (AP=49.4):  81.9ms,  FPS = 12.2   (batch size 8 FPS=)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "VW95IodKovEu"
      },
      "source": [
        "## 1.2 Benchmark end-to-end latency"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "NSf6SrZcdavN"
      },
      "source": [
        "# Benchmark end-to-end latency (: preprocess + network + posprocess).\n",
        "#\n",
        "# With colab + Tesla T4 GPU, here are the batch size 1 latency summary:\n",
        "# D0 (AP=33.5): 22.7ms,  FPS = 43.1   (batch size 4, FPS=)\n",
        "# D1 (AP=39.6): 34.3ms,  FPS = 29.2   (batch size 4, FPS=)\n",
        "# D2 (AP=43.0): 42.5ms,  FPS = 23.5   (batch size 4, FPS=)\n",
        "# D3 (AP=45.8): 64.8ms,  FPS = 15.4   (batch size 4, FPS=)\n",
        "# D4 (AP=49.4): 93.7ms,  FPS = 10.7   (batch size 4, FPS=)\n",
        "\n",
        "batch_size = 1  # @param\n",
        "\n",
        "saved_model_dir = 'savedmodel'\n",
        "!rm -rf {saved_model_dir}\n",
        "\n",
        "!python -m tf2.inspector --mode=export --model_name={MODEL} \\\n",
        "  --model_dir={ckpt_path} --saved_model_dir={saved_model_dir} \\\n",
        "  --batch_size={batch_size}  --hparams=\"mixed_precision=true\"\n",
        "!python -m tf2.inspector --mode=benchmark --model_name={MODEL} \\\n",
        "  --saved_model_dir={saved_model_dir} \\\n",
        "  --batch_size=1  --hparams=\"mixed_precision=true\" --input_image=testdata/img1.jpg\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "jGKs3w2_ZXnu"
      },
      "source": [
        "## 1.3 Inference images.\n",
        "\n",
        "---\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "tlh_S6M9ahe5"
      },
      "source": [
        "# first export a saved model.\n",
        "saved_model_dir = 'savedmodel'\n",
        "!rm -rf {saved_model_dir}\n",
        "!python -m tf2.inspector --mode=export --model_name={MODEL} \\\n",
        "  --model_dir={ckpt_path} --saved_model_dir={saved_model_dir}\n",
        "\n",
        "# Then run saved_model_infer to do inference.\n",
        "# Notably: batch_size, image_size must be the same as when it is exported.\n",
        "serve_image_out = 'serve_image_out'\n",
        "!mkdir {serve_image_out}\n",
        "\n",
        "!python -m tf2.inspector --mode=infer \\\n",
        "  --saved_model_dir={saved_model_dir} \\\n",
        "  --model_name={MODEL}  --input_image=testdata/img1.jpg  \\\n",
        "  --output_image_dir={serve_image_out}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1q2x8s8GpUJz"
      },
      "source": [
        "from IPython import display\n",
        "display.display(display.Image(os.path.join(serve_image_out, '0.jpg')))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "fHU46tfckaZo"
      },
      "source": [
        "# In case you need to specify different image size or batch size or #boxes, then\n",
        "# you need to export a new saved model and run the inferernce.\n",
        "\n",
        "serve_image_out = 'serve_image_out'\n",
        "!mkdir {serve_image_out}\n",
        "saved_model_dir = 'savedmodel'\n",
        "!rm -rf {saved_model_dir}\n",
        "\n",
        "# Step 1: export model\n",
        "!python -m tf2.inspector --mode=export \\\n",
        "  --model_name={MODEL} --model_dir={MODEL} \\\n",
        "  --hparams=\"image_size=1920x1280\" --saved_model_dir={saved_model_dir}\n",
        "\n",
        "# Step 2: do inference with saved model.\n",
        "!python -m tf2.inspector --mode=infer \\\n",
        "  --model_name={MODEL} --saved_model_dir={saved_model_dir} \\\n",
        "  --input_image=img.png --output_image_dir={serve_image_out} \\\n",
        "\n",
        "from IPython import display\n",
        "display.display(display.Image(os.path.join(serve_image_out, '0.jpg')))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Vxm-kvfuAZne"
      },
      "source": [
        "## 1.4 Inference video"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "3Pdnd1kQAgKY"
      },
      "source": [
        "# step 0: download video\n",
        "video_url = 'https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/data/video480p.mov'  # @param\n",
        "!wget {video_url} -O input.mov\n",
        "\n",
        "# Step 1: export model\n",
        "saved_model_dir = 'savedmodel'\n",
        "!rm -rf {saved_model_dir}\n",
        "\n",
        "!python -m tf2.inspector --mode=export \\\n",
        "  --model_name={MODEL} --model_dir={MODEL} \\\n",
        "  --saved_model_dir={saved_model_dir} --hparams=\"mixed_precision=true\"\n",
        "\n",
        "# Step 2: do inference with saved model using saved_model_video\n",
        "!python -m tf2.inspector --mode=video \\\n",
        "  --model_name={MODEL} \\\n",
        "  --saved_model_dir={saved_model_dir} --hparams=\"mixed_precision=true\" \\\n",
        "  --input_video=input.mov --output_video=output.mov\n",
        "# Then you can view the output.mov"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "M_r8Ja9aEqBP"
      },
      "source": [
        "# 2. TFlite"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "-eDhDTLQL6gx"
      },
      "source": [
        "## 2.1 COCO evaluation on validation set."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "qZK6rk8OMBRZ"
      },
      "source": [
        "if 'val2017' not in os.listdir():\n",
        "  !wget http://images.cocodataset.org/zips/val2017.zip\n",
        "  !wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip\n",
        "  !unzip -q val2017.zip\n",
        "  !unzip annotations_trainval2017.zip\n",
        "\n",
        "  !mkdir tfrecord\n",
        "  !python -m dataset.create_coco_tfrecord \\\n",
        "      --image_dir=val2017 \\\n",
        "      --object_annotations_file=annotations/instances_val2017.json \\\n",
        "      --output_file_prefix=tfrecord/val \\\n",
        "      --num_shards=32"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Cy6doZAcMZnX"
      },
      "source": [
        "## 2.2 TFlite export INT8 model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "VgpOjjokE6_o"
      },
      "source": [
        "# In case you need to specify different image size or batch size or #boxes, then\n",
        "# you need to export a new saved model and run the inferernce.\n",
        "\n",
        "serve_image_out = 'serve_image_out'\n",
        "!mkdir {serve_image_out}\n",
        "saved_model_dir = 'savedmodel'\n",
        "!rm -rf {saved_model_dir}\n",
        "\n",
        "# # Step 1: export model\n",
        "!python -m tf2.inspector --mode=export --file_pattern=tfrecord/*.tfrecord \\\n",
        "  --model_name={MODEL} --model_dir={MODEL} --num_calibration_steps=100 \\\n",
        "  --saved_model_dir={saved_model_dir} --use_xla --tflite=INT8\n",
        "\n",
        "# Step 2: do inference with saved model.\n",
        "!python -m tf2.inspector --mode=infer  --use_xla \\\n",
        "  --model_name={MODEL} --saved_model_dir={saved_model_dir}/int8.tflite \\\n",
        "  --input_image=testdata/img1.jpg --output_image_dir={serve_image_out}\n",
        "\n",
        "from IPython import display\n",
        "display.display(display.Image(os.path.join(serve_image_out, '0.jpg')))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "j7Xz1FQ2PB0Z"
      },
      "source": [
        "# Evalute on validation set (takes about 10 mins for efficientdet-d0)\n",
        "!python -m tf2.eval_tflite  \\\n",
        "    --model_name={MODEL}  --tflite_path={saved_model_dir}/int8.tflite   \\\n",
        "    --val_file_pattern=tfrecord/val* \\\n",
        "    --val_json_file=annotations/instances_val2017.json --eval_samples=100"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "xY14TFOWNNFl"
      },
      "source": [
        "## 2.3 Compile EdgeTPU model (Optional)"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "jGm0q7cSNSbs"
      },
      "source": [
        " # install edgetpu compiler\n",
        " !curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\n",
        " !echo \"deb https://packages.cloud.google.com/apt coral-edgetpu-stable main\" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list\n",
        " !sudo apt-get update\n",
        " !sudo apt-get install edgetpu-compiler"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "z8FGuSJuOfer"
      },
      "source": [
        "The EdgeTPU has 8MB of SRAM for caching model paramaters ([more info](https://coral.ai/docs/edgetpu/compiler/#parameter-data-caching)). This means that for models that are larger than 8MB, inference time will be increased in order to transfer over model paramaters. One way to avoid this is [Model Pipelining](https://coral.ai/docs/edgetpu/pipeline/) - splitting the model into segments that can have a dedicated EdgeTPU. This can significantly improve latency.\n",
        "\n",
        "The below table can be used as a reference for the number of Edge TPUs to use - the larger models will not compile for a single TPU as the intermediate tensors can't fit in on-chip memory.\n",
        "\n",
        "| Model architecture | Minimum TPUs | Recommended TPUs\n",
        "|--------------------|-------|-------|\n",
        "| EfficientDet-Lite0 | 1     | 1     |\n",
        "| EfficientDet-Lite1 | 1     | 1     |\n",
        "| EfficientDet-Lite2 | 1     | 2     |\n",
        "| EfficientDet-Lite3 | 2     | 2     |\n",
        "| EfficientDet-Lite4 | 2     | 3     |"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "WpSryQzZODse"
      },
      "source": [
        "NUMBER_OF_TPUS = 1\n",
        "!edgetpu_compiler {saved_model_dir}/int8.tflite --num_segments=$NUMBER_OF_TPUS"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "RW26DwfirQQN"
      },
      "source": [
        "# 3. COCO evaluation"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "cfn_tRFOWKMO"
      },
      "source": [
        "## 3.1 COCO evaluation on validation set."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "24l4uI15MJN6"
      },
      "source": [
        "if 'val2017' not in os.listdir():\n",
        "  !wget http://images.cocodataset.org/zips/val2017.zip\n",
        "  !wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip\n",
        "  !unzip -q val2017.zip\n",
        "  !unzip annotations_trainval2017.zip\n",
        "\n",
        "  !mkdir tfrecord\n",
        "  !python -m dataset.create_coco_tfrecord \\\n",
        "      --image_dir=val2017 \\\n",
        "      --object_annotations_file=annotations/instances_val2017.json \\\n",
        "      --output_file_prefix=tfrecord/val \\\n",
        "      --num_shards=32"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "eLHZUY3jQpZr"
      },
      "source": [
        "# Evalute on validation set (takes about 10 mins for efficientdet-d0)\n",
        "!python -m tf2.eval  \\\n",
        "    --model_name={MODEL}  --model_dir={ckpt_path}  \\\n",
        "    --val_file_pattern=tfrecord/val*  \\\n",
        "    --val_json_file=annotations/instances_val2017.json"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "RW90fiMiyg4n"
      },
      "source": [
        "# 4. Training EfficientDets on PASCAL."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "C98Ye0MEyuKD"
      },
      "source": [
        "## 4.1 Prepare data"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "6PC6QrMlylOF"
      },
      "source": [
        "# Get pascal voc 2012 trainval data\n",
        "import os\n",
        "if 'VOCdevkit' not in os.listdir():\n",
        "  !wget http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar\n",
        "  !tar xf VOCtrainval_06-Nov-2007.tar\n",
        "\n",
        "  !mkdir tfrecord\n",
        "  !python -m dataset.create_pascal_tfrecord  \\\n",
        "    --data_dir=VOCdevkit --year=VOC2007  --output_path=tfrecord/pascal\n",
        "\n",
        "# Pascal has 5717 train images with 100 shards epoch, here we use a single shard\n",
        "# for demo, but users should use all shards pascal-*-of-00100.tfrecord.\n",
        "file_pattern = 'pascal-00000-of-00100.tfrecord'  # @param\n",
        "images_per_epoch = 57 * len(tf.io.gfile.glob('tfrecord/' + file_pattern))\n",
        "images_per_epoch = images_per_epoch // 8 * 8  # round to 64.\n",
        "print('images_per_epoch = {}'.format(images_per_epoch))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ZcxDDCCW0ndv"
      },
      "source": [
        "## 4.2 Train Pascal VOC 2007 from ImageNet checkpoint for Backbone."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "SHPgm9Q13X-l"
      },
      "source": [
        "# Train efficientdet from scratch with backbone checkpoint.\n",
        "backbone_name = {\n",
        "    'efficientdet-d0': 'efficientnet-b0',\n",
        "    'efficientdet-d1': 'efficientnet-b1',\n",
        "    'efficientdet-d2': 'efficientnet-b2',\n",
        "    'efficientdet-d3': 'efficientnet-b3',\n",
        "    'efficientdet-d4': 'efficientnet-b4',\n",
        "    'efficientdet-d5': 'efficientnet-b5',\n",
        "    'efficientdet-d6': 'efficientnet-b6',\n",
        "    'efficientdet-d7': 'efficientnet-b6',\n",
        "    'efficientdet-lite0': 'efficientnet-lite0',\n",
        "    'efficientdet-lite1': 'efficientnet-lite1',\n",
        "    'efficientdet-lite2': 'efficientnet-lite2',\n",
        "    'efficientdet-lite3': 'efficientnet-lite3',\n",
        "    'efficientdet-lite3x': 'efficientnet-lite3',\n",
        "    'efficientdet-lite4': 'efficientnet-lite4',\n",
        "}[MODEL]\n",
        "\n",
        "\n",
        "# generating train tfrecord is large, so we skip the execution here.\n",
        "import os\n",
        "if backbone_name not in os.listdir():\n",
        "  if backbone_name.find('lite'):\n",
        "    !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/lite/{backbone_name}.tar.gz\n",
        "  else:\n",
        "    !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientnet/ckptsaug/{backbone_name}.tar.gz\n",
        "  !tar xf {backbone_name}.tar.gz\n",
        "\n",
        "!mkdir /tmp/model_dir\n",
        "# key option: use --backbone_ckpt rather than --ckpt.\n",
        "# Don't use ema since we only train a few steps.\n",
        "!python -m tf2.train --mode=traineval \\\n",
        "    --train_file_pattern=tfrecord/{file_pattern} \\\n",
        "    --val_file_pattern=tfrecord/{file_pattern} \\\n",
        "    --model_name={MODEL} \\\n",
        "    --model_dir=/tmp/model_dir/{MODEL}-scratch  \\\n",
        "    --pretrained_ckpt={backbone_name} \\\n",
        "    --batch_size=16 \\\n",
        "    --eval_samples={images_per_epoch}  \\\n",
        "    --num_examples_per_epoch={images_per_epoch}  --num_epochs=1  \\\n",
        "    --hparams=\"num_classes=20,moving_average_decay=0,mixed_precision=true\""
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "SKHu-3lBwTiM"
      },
      "source": [
        "## 4.3 Train Pascal VOC 2007 from COCO checkpoint for the whole net."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "SD59rsZJc1WW"
      },
      "source": [
        "# generating train tfrecord is large, so we skip the execution here.\n",
        "import os\n",
        "if MODEL not in os.listdir():\n",
        "  download(MODEL)\n",
        "\n",
        "!mkdir /tmp/model_dir/\n",
        "# key option: use --ckpt rather than --backbone_ckpt.\n",
        "!python -m tf2.train --mode=traineval \\\n",
        "    --train_file_pattern=tfrecord/{file_pattern} \\\n",
        "    --val_file_pattern=tfrecord/{file_pattern} \\\n",
        "    --model_name={MODEL} \\\n",
        "    --model_dir=/tmp/model_dir/{MODEL}-finetune \\\n",
        "    --pretrained_ckpt={MODEL} \\\n",
        "    --batch_size=16 \\\n",
        "    --eval_samples={images_per_epoch}  \\\n",
        "    --num_examples_per_epoch={images_per_epoch}  --num_epochs=1  \\\n",
        "    --hparams=\"num_classes=20,moving_average_decay=0,mixed_precision=true\""
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "QcBGPMCXRC8q"
      },
      "source": [
        "## 4.4 View tensorboard for loss and accuracy.\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Vrkty06SRD0k"
      },
      "source": [
        "%load_ext tensorboard\n",
        "%tensorboard --logdir /tmp/model_dir/\n",
        "# Notably, this is just a demo with almost zero accuracy due to very limited\n",
        "# training steps, but we can see finetuning has smaller loss than training\n",
        "# from scratch at the begining."
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "7y4kKRr5ChwT"
      },
      "source": [
        "## 5. Export to onnx\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "eh6pRLjOCsQo"
      },
      "source": [
        "!pip install tf2onnx"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "01GwBpcJDXVC"
      },
      "source": [
        "!python -m tf2.inspector --mode=export --model_name={MODEL} --model_dir={MODEL} --saved_model_dir={saved_model_dir} --hparams=\"nms_configs.method='hard', nms_configs.iou_thresh=0.5, nms_configs.sigma=0.0\""
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "jhIxTWHLCvgO"
      },
      "source": [
        "!python -m tf2onnx.convert --saved-model={saved_model_dir} --output={saved_model_dir}/model.onnx  --opset=11"
      ],
      "execution_count": null,
      "outputs": []
    }
  ]
}