{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Compiling and Optimizing a Model with TVMC\n",
    "https://tvm.apache.org/docs/tutorial/tvmc_command_line_driver.html\n",
    "https://colab.research.google.com/github/apache/tvm-site/blob/asf-site/docs/_downloads/efe0b02e219b28e0bd85fbdda35ba8ac/tvmc_command_line_driver.ipynb#scrollTo=0qj2kA1OBL2e\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--2023-08-27 19:17:21--  https://github.com/onnx/models/raw/b9a54e89508f101a1611cd64f4ef56b9cb62c7cf/vision/classification/resnet/model/resnet50-v2-7.onnx\n",
      "Resolving github.com (github.com)... 192.30.255.113, 198.51.44.8, 198.51.45.8, ...\n",
      "Connecting to github.com (github.com)|192.30.255.113|:443... connected.\n",
      "HTTP request sent, awaiting response... 302 Found\n",
      "Location: https://media.githubusercontent.com/media/onnx/models/b9a54e89508f101a1611cd64f4ef56b9cb62c7cf/vision/classification/resnet/model/resnet50-v2-7.onnx [following]\n",
      "--2023-08-27 19:17:23--  https://media.githubusercontent.com/media/onnx/models/b9a54e89508f101a1611cd64f4ef56b9cb62c7cf/vision/classification/resnet/model/resnet50-v2-7.onnx\n",
      "Resolving media.githubusercontent.com (media.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
      "Connecting to media.githubusercontent.com (media.githubusercontent.com)|185.199.108.133|:443... connected.\n",
      "HTTP request sent, awaiting response... 200 OK\n",
      "Length: 102442450 (98M) [application/octet-stream]\n",
      "Saving to: ‘resnet50-v2-7.onnx’\n",
      "\n",
      "resnet50-v2-7.onnx  100%[===================>]  97.70M  1.63MB/s    in 6m 59s  \n",
      "\n",
      "2023-08-27 19:24:28 (239 KB/s) - ‘resnet50-v2-7.onnx’ saved [102442450/102442450]\n",
      "\n"
     ]
    }
   ],
   "source": [
    "!wget https://github.com/onnx/models/raw/b9a54e89508f101a1611cd64f4ef56b9cb62c7cf/vision/classification/resnet/model/resnet50-v2-7.onnx"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Error: Input file 'resnet50-v2-7.onnx' doesn't exist, is a broken symbolic link, or a directory.\n"
     ]
    }
   ],
   "source": [
    "!tvmc compile --target \"llvm\" --input-shapes \"data:[1,3,224,224]\" --output resnet50-v2-7-tvm.tar resnet50-v2-7.onnx"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "!mkdir model\n",
    "!tar -xvf resnet50-v2-7-tvm.tar -C model\n",
    "!ls model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "https://tvm.apache.org/docs/tutorial/tvmc_python.html\n",
    "\n",
    "Getting Starting using TVMC Python: a high-level API for TVM¶"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tvm.driver import tvmc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Collecting onnx\n",
      "  Obtaining dependency information for onnx from https://files.pythonhosted.org/packages/ff/24/0e522fdcadf0e15fc304145a5b6e5d7246d7f2c507fd9bfe6e1fafb2aa95/onnx-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n",
      "  Downloading onnx-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (15 kB)\n",
      "Collecting onnxruntime\n",
      "  Obtaining dependency information for onnxruntime from https://files.pythonhosted.org/packages/50/d5/f156d808c9cc59e7a8b87843a5313719d01eec7d3b17ca3f6b6f0fbee0f0/onnxruntime-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata\n",
      "  Downloading onnxruntime-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.0 kB)\n",
      "Collecting onnxoptimizer\n",
      "  Downloading onnxoptimizer-0.3.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (678 kB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m678.2/678.2 kB\u001b[0m \u001b[31m26.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hRequirement already satisfied: numpy in /home/xling/miniconda2/envs/tvm/lib/python3.9/site-packages (from onnx) (1.25.2)\n",
      "Collecting protobuf>=3.20.2 (from onnx)\n",
      "  Obtaining dependency information for protobuf>=3.20.2 from https://files.pythonhosted.org/packages/f3/d8/fb02c40aa129c385430d177e4d9fa0160cb89be29305c8760861e538a2e4/protobuf-4.24.2-cp37-abi3-manylinux2014_x86_64.whl.metadata\n",
      "  Downloading protobuf-4.24.2-cp37-abi3-manylinux2014_x86_64.whl.metadata (540 bytes)\n",
      "Requirement already satisfied: typing-extensions>=3.6.2.1 in /home/xling/miniconda2/envs/tvm/lib/python3.9/site-packages (from onnx) (4.7.1)\n",
      "Collecting coloredlogs (from onnxruntime)\n",
      "  Downloading coloredlogs-15.0.1-py2.py3-none-any.whl (46 kB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m18.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hCollecting flatbuffers (from onnxruntime)\n",
      "  Obtaining dependency information for flatbuffers from https://files.pythonhosted.org/packages/6f/12/d5c79ee252793ffe845d58a913197bfa02ae9a0b5c9bc3dc4b58d477b9e7/flatbuffers-23.5.26-py2.py3-none-any.whl.metadata\n",
      "  Downloading flatbuffers-23.5.26-py2.py3-none-any.whl.metadata (850 bytes)\n",
      "Requirement already satisfied: packaging in /home/xling/miniconda2/envs/tvm/lib/python3.9/site-packages (from onnxruntime) (23.0)\n",
      "Collecting sympy (from onnxruntime)\n",
      "  Using cached sympy-1.12-py3-none-any.whl (5.7 MB)\n",
      "Collecting humanfriendly>=9.1 (from coloredlogs->onnxruntime)\n",
      "  Downloading humanfriendly-10.0-py2.py3-none-any.whl (86 kB)\n",
      "\u001b[2K     \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m17.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hCollecting mpmath>=0.19 (from sympy->onnxruntime)\n",
      "  Using cached mpmath-1.3.0-py3-none-any.whl (536 kB)\n",
      "Downloading onnx-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (14.6 MB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m14.6/14.6 MB\u001b[0m \u001b[31m52.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m00:01\u001b[0m\n",
      "\u001b[?25hDownloading onnxruntime-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (5.9 MB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.9/5.9 MB\u001b[0m \u001b[31m16.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n",
      "\u001b[?25hDownloading protobuf-4.24.2-cp37-abi3-manylinux2014_x86_64.whl (311 kB)\n",
      "\u001b[2K   \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m311.4/311.4 kB\u001b[0m \u001b[31m10.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
      "\u001b[?25hDownloading flatbuffers-23.5.26-py2.py3-none-any.whl (26 kB)\n",
      "Installing collected packages: mpmath, flatbuffers, sympy, protobuf, humanfriendly, onnx, coloredlogs, onnxruntime, onnxoptimizer\n",
      "Successfully installed coloredlogs-15.0.1 flatbuffers-23.5.26 humanfriendly-10.0 mpmath-1.3.0 onnx-1.14.1 onnxoptimizer-0.3.13 onnxruntime-1.15.1 protobuf-4.24.2 sympy-1.12\n"
     ]
    }
   ],
   "source": [
    "!pip install onnx onnxruntime onnxoptimizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "One or more operators have not been tuned. Please tune your model for better performance. Use DEBUG logging level to see more details.\n",
      "2023-08-29 11:13:00.015 INFO load_module /tmp/tmp3x5goe7d/mod.so\n"
     ]
    }
   ],
   "source": [
    "model = tvmc.load('models/resnet50-v2-7.onnx') #Step 1: Load\n",
    "package = tvmc.compile(model, target=\"llvm\") #Step 2: Compile\n",
    "result = tvmc.run(package, device=\"cpu\") #Step 3: Run\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[Task  1/25]  Current/Best:   25.91/  27.64 GFLOPS | Progress: (62/400) | 50.85 s"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[8], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m tvmc\u001b[39m.\u001b[39;49mtune(model, target\u001b[39m=\u001b[39;49m\u001b[39m\"\u001b[39;49m\u001b[39mllvm\u001b[39;49m\u001b[39m\"\u001b[39;49m)\n",
      "File \u001b[0;32m~/miniconda2/envs/tvm/lib/python3.9/site-packages/tvm/driver/tvmc/autotuner.py:498\u001b[0m, in \u001b[0;36mtune_model\u001b[0;34m(tvmc_model, target, tuning_records, prior_records, enable_autoscheduler, rpc_key, hostname, port, trials, target_host, tuner, min_repeat_ms, early_stopping, desired_layout, timeout, repeat, number, parallel, hardware_params, include_simple_tasks, log_estimated_latency, additional_target_options)\u001b[0m\n\u001b[1;32m    487\u001b[0m     tuning_options \u001b[39m=\u001b[39m {\n\u001b[1;32m    488\u001b[0m         \u001b[39m\"\u001b[39m\u001b[39mtuner\u001b[39m\u001b[39m\"\u001b[39m: tuner,\n\u001b[1;32m    489\u001b[0m         \u001b[39m\"\u001b[39m\u001b[39mtrials\u001b[39m\u001b[39m\"\u001b[39m: trials,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    494\u001b[0m         \u001b[39m\"\u001b[39m\u001b[39mtuning_records\u001b[39m\u001b[39m\"\u001b[39m: prior_records,\n\u001b[1;32m    495\u001b[0m     }\n\u001b[1;32m    496\u001b[0m     logger\u001b[39m.\u001b[39minfo(\u001b[39m\"\u001b[39m\u001b[39mAutotuning with configuration: \u001b[39m\u001b[39m%s\u001b[39;00m\u001b[39m\"\u001b[39m, tuning_options)\n\u001b[0;32m--> 498\u001b[0m     tune_tasks(tasks, tuning_records, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mtuning_options)\n\u001b[1;32m    500\u001b[0m \u001b[39mreturn\u001b[39;00m tuning_records\n",
      "File \u001b[0;32m~/miniconda2/envs/tvm/lib/python3.9/site-packages/tvm/driver/tvmc/autotuner.py:699\u001b[0m, in \u001b[0;36mtune_tasks\u001b[0;34m(tasks, log_file, measure_option, tuner, trials, early_stopping, tuning_records)\u001b[0m\n\u001b[1;32m    696\u001b[0m     tuner_obj\u001b[39m.\u001b[39mload_history(autotvm\u001b[39m.\u001b[39mrecord\u001b[39m.\u001b[39mload_from_file(tuning_records))\n\u001b[1;32m    697\u001b[0m     logging\u001b[39m.\u001b[39minfo(\u001b[39m\"\u001b[39m\u001b[39mloaded history in \u001b[39m\u001b[39m%.2f\u001b[39;00m\u001b[39m sec(s)\u001b[39m\u001b[39m\"\u001b[39m, time\u001b[39m.\u001b[39mtime() \u001b[39m-\u001b[39m start_time)\n\u001b[0;32m--> 699\u001b[0m tuner_obj\u001b[39m.\u001b[39;49mtune(\n\u001b[1;32m    700\u001b[0m     n_trial\u001b[39m=\u001b[39;49m\u001b[39mmin\u001b[39;49m(trials, \u001b[39mlen\u001b[39;49m(tsk\u001b[39m.\u001b[39;49mconfig_space)),\n\u001b[1;32m    701\u001b[0m     early_stopping\u001b[39m=\u001b[39;49mearly_stopping,\n\u001b[1;32m    702\u001b[0m     measure_option\u001b[39m=\u001b[39;49mmeasure_option,\n\u001b[1;32m    703\u001b[0m     callbacks\u001b[39m=\u001b[39;49m[\n\u001b[1;32m    704\u001b[0m         autotvm\u001b[39m.\u001b[39;49mcallback\u001b[39m.\u001b[39;49mprogress_bar(trials, prefix\u001b[39m=\u001b[39;49mprefix),\n\u001b[1;32m    705\u001b[0m         autotvm\u001b[39m.\u001b[39;49mcallback\u001b[39m.\u001b[39;49mlog_to_file(log_file),\n\u001b[1;32m    706\u001b[0m     ],\n\u001b[1;32m    707\u001b[0m )\n",
      "File \u001b[0;32m~/miniconda2/envs/tvm/lib/python3.9/site-packages/tvm/autotvm/tuner/xgboost_tuner.py:105\u001b[0m, in \u001b[0;36mXGBTuner.tune\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m    104\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mtune\u001b[39m(\u001b[39mself\u001b[39m, \u001b[39m*\u001b[39margs, \u001b[39m*\u001b[39m\u001b[39m*\u001b[39mkwargs):  \u001b[39m# pylint: disable=arguments-differ\u001b[39;00m\n\u001b[0;32m--> 105\u001b[0m     \u001b[39msuper\u001b[39;49m(XGBTuner, \u001b[39mself\u001b[39;49m)\u001b[39m.\u001b[39;49mtune(\u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n\u001b[1;32m    107\u001b[0m     \u001b[39m# manually close pool to avoid multiprocessing issues\u001b[39;00m\n\u001b[1;32m    108\u001b[0m     \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mcost_model\u001b[39m.\u001b[39m_close_pool()\n",
      "File \u001b[0;32m~/miniconda2/envs/tvm/lib/python3.9/site-packages/tvm/autotvm/tuner/tuner.py:135\u001b[0m, in \u001b[0;36mTuner.tune\u001b[0;34m(self, n_trial, measure_option, early_stopping, callbacks, si_prefix)\u001b[0m\n\u001b[1;32m    132\u001b[0m configs \u001b[39m=\u001b[39m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mnext_batch(\u001b[39mmin\u001b[39m(n_parallel, n_trial \u001b[39m-\u001b[39m i))\n\u001b[1;32m    134\u001b[0m inputs \u001b[39m=\u001b[39m [MeasureInput(\u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtask\u001b[39m.\u001b[39mtarget, \u001b[39mself\u001b[39m\u001b[39m.\u001b[39mtask, config) \u001b[39mfor\u001b[39;00m config \u001b[39min\u001b[39;00m configs]\n\u001b[0;32m--> 135\u001b[0m results \u001b[39m=\u001b[39m measure_batch(inputs)\n\u001b[1;32m    137\u001b[0m \u001b[39m# keep best config\u001b[39;00m\n\u001b[1;32m    138\u001b[0m \u001b[39mfor\u001b[39;00m k, (inp, res) \u001b[39min\u001b[39;00m \u001b[39menumerate\u001b[39m(\u001b[39mzip\u001b[39m(inputs, results)):\n",
      "File \u001b[0;32m~/miniconda2/envs/tvm/lib/python3.9/site-packages/tvm/autotvm/measure/measure.py:291\u001b[0m, in \u001b[0;36mcreate_measure_batch.<locals>.measure_batch\u001b[0;34m(measure_inputs)\u001b[0m\n\u001b[1;32m    289\u001b[0m \u001b[39mdef\u001b[39;00m \u001b[39mmeasure_batch\u001b[39m(measure_inputs):\n\u001b[1;32m    290\u001b[0m     build_results \u001b[39m=\u001b[39m builder\u001b[39m.\u001b[39mbuild(measure_inputs)\n\u001b[0;32m--> 291\u001b[0m     results \u001b[39m=\u001b[39m runner\u001b[39m.\u001b[39;49mrun(measure_inputs, build_results)\n\u001b[1;32m    292\u001b[0m     \u001b[39mreturn\u001b[39;00m results\n",
      "File \u001b[0;32m~/miniconda2/envs/tvm/lib/python3.9/site-packages/tvm/autotvm/measure/measure_methods.py:392\u001b[0m, in \u001b[0;36mRPCRunner.run\u001b[0;34m(self, measure_inputs, build_results)\u001b[0m\n\u001b[1;32m    390\u001b[0m \u001b[39mfor\u001b[39;00m future \u001b[39min\u001b[39;00m futures:\n\u001b[1;32m    391\u001b[0m     \u001b[39mtry\u001b[39;00m:\n\u001b[0;32m--> 392\u001b[0m         res \u001b[39m=\u001b[39m future\u001b[39m.\u001b[39;49mresult()\n\u001b[1;32m    393\u001b[0m         results\u001b[39m.\u001b[39mappend(res)\n\u001b[1;32m    394\u001b[0m     \u001b[39mexcept\u001b[39;00m \u001b[39mException\u001b[39;00m \u001b[39mas\u001b[39;00m ex:  \u001b[39m# pylint: disable=broad-except\u001b[39;00m\n",
      "File \u001b[0;32m~/miniconda2/envs/tvm/lib/python3.9/concurrent/futures/_base.py:441\u001b[0m, in \u001b[0;36mFuture.result\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m    438\u001b[0m \u001b[39melif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_state \u001b[39m==\u001b[39m FINISHED:\n\u001b[1;32m    439\u001b[0m     \u001b[39mreturn\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m__get_result()\n\u001b[0;32m--> 441\u001b[0m \u001b[39mself\u001b[39;49m\u001b[39m.\u001b[39;49m_condition\u001b[39m.\u001b[39;49mwait(timeout)\n\u001b[1;32m    443\u001b[0m \u001b[39mif\u001b[39;00m \u001b[39mself\u001b[39m\u001b[39m.\u001b[39m_state \u001b[39min\u001b[39;00m [CANCELLED, CANCELLED_AND_NOTIFIED]:\n\u001b[1;32m    444\u001b[0m     \u001b[39mraise\u001b[39;00m CancelledError()\n",
      "File \u001b[0;32m~/miniconda2/envs/tvm/lib/python3.9/threading.py:312\u001b[0m, in \u001b[0;36mCondition.wait\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m    310\u001b[0m \u001b[39mtry\u001b[39;00m:    \u001b[39m# restore state no matter what (e.g., KeyboardInterrupt)\u001b[39;00m\n\u001b[1;32m    311\u001b[0m     \u001b[39mif\u001b[39;00m timeout \u001b[39mis\u001b[39;00m \u001b[39mNone\u001b[39;00m:\n\u001b[0;32m--> 312\u001b[0m         waiter\u001b[39m.\u001b[39;49macquire()\n\u001b[1;32m    313\u001b[0m         gotit \u001b[39m=\u001b[39m \u001b[39mTrue\u001b[39;00m\n\u001b[1;32m    314\u001b[0m     \u001b[39melse\u001b[39;00m:\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "tvmc.tune(model, target=\"llvm\") #Step 1.5: Optional Tune"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "https://tvm.apache.org/docs/tutorial/autotvm_relay_x86.\n",
    "\n",
    "Compiling and Optimizing a Model with the Python Interface (AutoTVM)¶\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "import onnx\n",
    "from tvm.contrib.download import download_testdata\n",
    "from PIL import Image\n",
    "import numpy as np\n",
    "import tvm.relay as relay\n",
    "import tvm\n",
    "from tvm.contrib import graph_executor\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_url = (\n",
    "    \"https://github.com/onnx/models/raw/main/\"\n",
    "    \"vision/classification/resnet/model/\"\n",
    "    \"resnet50-v2-7.onnx\"\n",
    ")\n",
    "\n",
    "model_path = download_testdata(model_url, \"resnet50-v2-7.onnx\", module=\"onnx\")\n",
    "onnx_model = onnx.load(model_path)\n",
    "\n",
    "# Seed numpy's RNG to get consistent results\n",
    "np.random.seed(0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "img_url = \"https://s3.amazonaws.com/model-server/inputs/kitten.jpg\"\n",
    "img_path = download_testdata(img_url, \"imagenet_cat.png\", module=\"data\")\n",
    "\n",
    "# Resize it to 224x224\n",
    "resized_image = Image.open(img_path).resize((224, 224))\n",
    "img_data = np.asarray(resized_image).astype(\"float32\")\n",
    "\n",
    "# Our input image is in HWC layout while ONNX expects CHW input, so convert the array\n",
    "img_data = np.transpose(img_data, (2, 0, 1))\n",
    "\n",
    "# Normalize according to the ImageNet input specification\n",
    "imagenet_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))\n",
    "imagenet_stddev = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))\n",
    "norm_img_data = (img_data / 255 - imagenet_mean) / imagenet_stddev\n",
    "\n",
    "# Add the batch dimension, as we are expecting 4-dimensional input: NCHW.\n",
    "img_data = np.expand_dims(norm_img_data, axis=0)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "https://tvm.apache.org/docs/tutorial/tensor_expr_get_started.html\n",
    "\n",
    "Working with Operators Using Tensor Expression¶\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "import tvm\n",
    "import tvm.testing\n",
    "from tvm import te\n",
    "import numpy as np\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "tgt = tvm.target.Target(target=\"llvm\", host=\"llvm\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "@main = primfn(A_1: handle, B_1: handle, C_1: handle) -> ()\n",
      "  attr = {\"from_legacy_te_schedule\": True, \"global_symbol\": \"main\", \"tir.noalias\": True}\n",
      "  buffers = {A: Buffer(A_2: Pointer(float32), float32, [n: int32], [stride: int32], type=\"auto\"),\n",
      "             B: Buffer(B_2: Pointer(float32), float32, [n], [stride_1: int32], type=\"auto\"),\n",
      "             C: Buffer(C_2: Pointer(float32), float32, [n], [stride_2: int32], type=\"auto\")}\n",
      "  buffer_map = {A_1: A, B_1: B, C_1: C} {\n",
      "  for (i: int32, 0, n) {\n",
      "    C_3: Buffer(C_2, float32, [(stride_2*n)], [], type=\"auto\")[(i*stride_2)] = (A_3: Buffer(A_2, float32, [(stride*n)], [], type=\"auto\")[(i*stride)] + B_3: Buffer(B_2, float32, [(stride_1*n)], [], type=\"auto\")[(i*stride_1)])\n",
      "  }\n",
      "}\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "n = te.var(\"n\")\n",
    "A = te.placeholder((n,), name=\"A\")\n",
    "B = te.placeholder((n,), name=\"B\")\n",
    "C = te.compute(A.shape, lambda i: A[i] + B[i], name=\"C\")\n",
    "s = te.create_schedule(C.op)\n",
    "print(tvm.lower(s, [A, B, C], simple_mode=True))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "fadd = tvm.build(s, [A, B, C], tgt, name=\"myadd\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "dev = tvm.device(tgt.kind.name, 0)\n",
    "\n",
    "n = 1024\n",
    "a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)\n",
    "b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)\n",
    "c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)\n",
    "fadd(a, b, c)\n",
    "tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tvm",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.17"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
