{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Deploy Single Shot Multibox Detector(SSD) model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**Author**: *Yao Wang*, *Leyuan Wang*\n",
    "\n",
    "This article is an introductory tutorial to deploy SSD models with TVM.\n",
    "We will use GluonCV pre-trained SSD model and convert it to Relay IR"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/ --user psutil xgboost==1.0.2 tornado\n",
    "!pip install -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/ --user antlr4-python3-runtime\n",
    "!pip install -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/ --user gluoncv"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "sys.path.insert(0, \"tvm_upstream/python\")\n",
    "sys.path.insert(0, \"tvm_upstream/topi/python\")\n",
    "\n",
    "import tvm\n",
    "from tvm import te\n",
    "\n",
    "from matplotlib import pyplot as plt\n",
    "from tvm.relay.testing.config import ctx_list\n",
    "from tvm import relay\n",
    "from tvm.contrib import graph_runtime\n",
    "from tvm.contrib.download import download_testdata\n",
    "from gluoncv import model_zoo, data, utils"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tvm.autotvm.measure.measure_methods import set_cuda_target_arch\n",
    "set_cuda_target_arch('sm_37')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Preliminary and Set parameters\n",
    "\n",
    "We support compiling SSD on both CPUs and GPUs now.\n",
    "\n",
    "To get best inference performance on CPU, change\n",
    "target argument according to your device and\n",
    "follow the `tune_relay_x86` to tune x86 CPU and\n",
    "`tune_relay_arm` for arm CPU.\n",
    "\n",
    "To get best inference performance on Intel graphics,\n",
    "change target argument to :code:`opencl -device=intel_graphics`.\n",
    "But when using Intel graphics on Mac, target needs to\n",
    "be set to `opencl` only for the reason that Intel subgroup\n",
    "extension is not supported on Mac.\n",
    "\n",
    "To get best inference performance on CUDA-based GPUs,\n",
    "change the target argument to :code:`cuda`; and for\n",
    "OPENCL-based GPUs, change target argument to\n",
    "`opencl` followed by device argument according\n",
    "to your device."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "supported_model = [\n",
    "    'ssd_512_resnet50_v1_voc',\n",
    "    'ssd_512_resnet50_v1_coco',\n",
    "    'ssd_512_resnet101_v2_voc',\n",
    "    'ssd_512_mobilenet1.0_voc',\n",
    "    'ssd_512_mobilenet1.0_coco',\n",
    "    'ssd_300_vgg16_atrous_voc'\n",
    "    'ssd_512_vgg16_atrous_coco',\n",
    "]\n",
    "\n",
    "model_name = supported_model[0]\n",
    "dshape = (1, 3, 512, 512)\n",
    "target_list = ctx_list()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Download and pre-process demo image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "!mkdir -p ~/.tvm_test_data/data/\n",
    "!cp street_small.jpg ~/.tvm_test_data/data/\n",
    "im_fname = download_testdata('https://github.com/dmlc/web-data/blob/master/' +\n",
    "                             'gluoncv/detection/street_small.jpg?raw=true',\n",
    "                             'street_small.jpg', module='data')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "x, img = data.transforms.presets.ssd.load_test(im_fname, short=512)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Convert and compile model for CPU."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "!aws s3 cp s3://sagemaker-cn-north-1-338598867091/ssd_512_resnet50_v1_voc-9c8b225a.zip ~/.mxnet/models/\n",
    "!cd ~/.mxnet/models/ && unzip ssd_512_resnet50_v1_voc-9c8b225a.zip"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "block = model_zoo.get_model(model_name, pretrained=True)\n",
    "\n",
    "def build(target):\n",
    "    mod, params = relay.frontend.from_mxnet(block, {\"data\": dshape})\n",
    "    with relay.build_config(opt_level=3):\n",
    "        graph, lib, params = relay.build(mod, target, params=params)\n",
    "    return graph, lib, params"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Create TVM runtime and do inference"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "def run(graph, lib, params, ctx):\n",
    "    # Build TVM runtime\n",
    "    m = graph_runtime.create(graph, lib, ctx)\n",
    "    tvm_input = tvm.nd.array(x.asnumpy(), ctx=ctx)\n",
    "    m.set_input('data', tvm_input)\n",
    "    m.set_input(**params)\n",
    "    \n",
    "    tic = time.time()\n",
    "    # execute\n",
    "    m.run()\n",
    "    # get outputs\n",
    "    class_IDs, scores, bounding_boxs = m.get_output(0).asnumpy()[0], m.get_output(1).asnumpy()[0], m.get_output(2).asnumpy()[0]\n",
    "    toc = time.time()\n",
    "\n",
    "    tic = time.time()\n",
    "    # execute\n",
    "    m.run()\n",
    "    # get outputs\n",
    "    class_IDs, scores, bounding_boxs = m.get_output(0).asnumpy()[0], m.get_output(1).asnumpy()[0], m.get_output(2).asnumpy()[0]\n",
    "    toc = time.time()\n",
    "    print(\"done! elapsed: {:.2f} ms.\".format((toc-tic)*1000.0))\n",
    "\n",
    "    return class_IDs, scores, bounding_boxs\n",
    "\n",
    "for target, ctx in target_list:\n",
    "    graph, lib, params = build(target)\n",
    "    print(\"running on {}\".format(target))\n",
    "    class_IDs, scores, bounding_boxs = run(graph, lib, params, ctx)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Display result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "ax = utils.viz.plot_bbox(img, bounding_boxs, scores,\n",
    "                         class_IDs, class_names=block.classes)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Begin Tuning"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from tvm import autotvm\n",
    "from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner\n",
    "\n",
    "#### DEVICE CONFIG ####\n",
    "target = tvm.target.cuda(model=\"k80\")\n",
    "from tvm.autotvm.measure.measure_methods import set_cuda_target_arch\n",
    "set_cuda_target_arch('sm_37')\n",
    "\n",
    "# You can skip the implementation of this function for this tutorial.\n",
    "def tune_tasks(tasks,\n",
    "               measure_option,\n",
    "               tuner='xgb',\n",
    "               n_trial=1000,\n",
    "               early_stopping=None,\n",
    "               log_filename='tuning.log',\n",
    "               use_transfer_learning=True):\n",
    "    # create tmp log file\n",
    "    tmp_log_file = log_filename + \".tmp\"\n",
    "    if os.path.exists(tmp_log_file):\n",
    "        os.remove(tmp_log_file)\n",
    "\n",
    "    for i, tsk in enumerate(reversed(tasks)):\n",
    "        prefix = \"[Task %2d/%2d] \" %(i+1, len(tasks))\n",
    "\n",
    "        # create tuner\n",
    "        if tuner == 'xgb' or tuner == 'xgb-rank':\n",
    "            tuner_obj = XGBTuner(tsk, loss_type='rank')\n",
    "        elif tuner == 'ga':\n",
    "            tuner_obj = GATuner(tsk, pop_size=100)\n",
    "        elif tuner == 'random':\n",
    "            tuner_obj = RandomTuner(tsk)\n",
    "        elif tuner == 'gridsearch':\n",
    "            tuner_obj = GridSearchTuner(tsk)\n",
    "        else:\n",
    "            raise ValueError(\"Invalid tuner: \" + tuner)\n",
    "\n",
    "        if use_transfer_learning:\n",
    "            if os.path.isfile(tmp_log_file):\n",
    "                tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))\n",
    "\n",
    "        # do tuning\n",
    "        tsk_trial = min(n_trial, len(tsk.config_space))\n",
    "        tuner_obj.tune(n_trial=tsk_trial,\n",
    "                       early_stopping=early_stopping,\n",
    "                       measure_option=measure_option,\n",
    "                       callbacks=[\n",
    "                           autotvm.callback.progress_bar(tsk_trial, prefix=prefix),\n",
    "                           autotvm.callback.log_to_file(tmp_log_file)\n",
    "                       ])\n",
    "\n",
    "    # pick best records to a cache file\n",
    "    autotvm.record.pick_best(tmp_log_file, log_filename)\n",
    "    os.remove(tmp_log_file)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Finally, we launch tuning jobs and evaluate the end-to-end performance.\n",
    "def tune_and_evaluate(tuning_opt):\n",
    "    # extract workloads from relay program\n",
    "    print(\"Extract tasks...\")\n",
    "    # mod, params, input_shape, out_shape = get_network(network, batch_size=1)\n",
    "    mod, params = relay.frontend.from_mxnet(block, {\"data\": dshape})\n",
    "    input_shape = dshape\n",
    "    tasks = autotvm.task.extract_from_program(mod[\"main\"], target=target,\n",
    "                                              params=params,\n",
    "                                              ops=(relay.op.get(\"nn.conv2d\"),))\n",
    "\n",
    "    # run tuning tasks\n",
    "    print(\"Tuning...\")\n",
    "    tune_tasks(tasks, **tuning_opt)\n",
    "\n",
    "    # compile kernels with history best records\n",
    "    with autotvm.apply_history_best(log_file):\n",
    "        print(\"Compile...\")\n",
    "        with relay.build_config(opt_level=3):\n",
    "            graph, lib, params = relay.build_module.build(\n",
    "                mod, target=target, params=params)\n",
    "\n",
    "        # export library\n",
    "        tmp = tempdir()\n",
    "        filename = \"net.tar\"\n",
    "        lib.export_library(tmp.relpath(filename))\n",
    "\n",
    "        # load parameters\n",
    "        ctx = tvm.context(str(target), 0)\n",
    "        module = runtime.create(graph, lib, ctx)\n",
    "        data_tvm = tvm.nd.array((np.random.uniform(size=input_shape)).astype(dtype))\n",
    "        module.set_input('data', data_tvm)\n",
    "        module.set_input(**params)\n",
    "\n",
    "        # evaluate\n",
    "        print(\"Evaluate inference time cost...\")\n",
    "        ftimer = module.module.time_evaluator(\"run\", ctx, number=1, repeat=600)\n",
    "        prof_res = np.array(ftimer().results) * 1000  # convert to millisecond\n",
    "        print(\"Mean inference time (std dev): %.2f ms (%.2f ms)\" %\n",
    "              (np.mean(prof_res), np.std(prof_res)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#### TUNING OPTION ####\n",
    "network = model_name\n",
    "log_file = \"%s.log\" % network\n",
    "dtype = 'float32'\n",
    "\n",
    "tuning_option = {\n",
    "    'log_filename': log_file,\n",
    "\n",
    "    'tuner': 'xgb',\n",
    "    'n_trial': 200,\n",
    "    'early_stopping': 60,\n",
    "\n",
    "    'measure_option': autotvm.measure_option(\n",
    "        builder=autotvm.LocalBuilder(timeout=10),\n",
    "        #runner=autotvm.LocalRunner(number=20, repeat=3, timeout=4, min_repeat_ms=150),\n",
    "        runner=autotvm.RPCRunner(\n",
    "            '1080ti',  # change the device key to your key\n",
    "            '0.0.0.0', 9190,\n",
    "            number=20, repeat=3, timeout=4, min_repeat_ms=150)\n",
    "    ),\n",
    "}\n",
    "\n",
    "tune_and_evaluate(tuning_option)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "conda_mxnet_p36",
   "language": "python",
   "name": "conda_mxnet_p36"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
