{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "%matplotlib inline"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "\n# Compile OneFlow Models\n**Author**: [Xiaoyu Zhang](https://github.com/BBuf/)\n\nThis article is an introductory tutorial to deploy OneFlow models with Relay.\n\nFor us to begin with, OneFlow package should be installed.\n\nA quick solution is to install via pip\n\n```bash\npip install flowvision==0.1.0\npython3 -m pip install -f https://release.oneflow.info oneflow==0.7.0+cpu\n```\nor please refer to official site:\nhttps://github.com/Oneflow-Inc/oneflow\n\nCurrently, TVM supports OneFlow 0.7.0. Other versions may be unstable.\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "import os, math\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom PIL import Image\n\n# oneflow imports\nimport flowvision\nimport oneflow as flow\nimport oneflow.nn as nn\n\nimport tvm\nfrom tvm import relay\nfrom tvm.contrib.download import download_testdata"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Load a pretrained OneFlow model and save model\n\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "model_name = \"resnet18\"\nmodel = getattr(flowvision.models, model_name)(pretrained=True)\nmodel = model.eval()\n\nmodel_dir = \"resnet18_model\"\nif not os.path.exists(model_dir):\n    flow.save(model.state_dict(), model_dir)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Load a test image\nClassic cat example!\n\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "from PIL import Image\n\nimg_url = \"https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true\"\nimg_path = download_testdata(img_url, \"cat.png\", module=\"data\")\nimg = Image.open(img_path).resize((224, 224))\n\n# Preprocess the image and convert to tensor\nfrom flowvision import transforms\n\nmy_preprocess = transforms.Compose(\n    [\n        transforms.Resize(256),\n        transforms.CenterCrop(224),\n        transforms.ToTensor(),\n        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n    ]\n)\nimg = my_preprocess(img)\nimg = np.expand_dims(img.numpy(), 0)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Import the graph to Relay\nConvert OneFlow graph to Relay graph. The input name can be arbitrary.\n\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "class Graph(flow.nn.Graph):\n    def __init__(self, module):\n        super().__init__()\n        self.m = module\n\n    def build(self, x):\n        out = self.m(x)\n        return out\n\n\ngraph = Graph(model)\n_ = graph._compile(flow.randn(1, 3, 224, 224))\n\nmod, params = relay.frontend.from_oneflow(graph, model_dir)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Relay Build\nCompile the graph to llvm target with given input specification.\n\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "target = tvm.target.Target(\"llvm\", host=\"llvm\")\ndev = tvm.cpu(0)\nwith tvm.transform.PassContext(opt_level=3):\n    lib = relay.build(mod, target=target, params=params)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Execute the portable graph on TVM\nNow we can try deploying the compiled model on target.\n\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "target = \"cuda\"\nwith tvm.transform.PassContext(opt_level=10):\n    intrp = relay.build_module.create_executor(\"graph\", mod, tvm.cuda(0), target)\n\nprint(type(img))\nprint(img.shape)\ntvm_output = intrp.evaluate()(tvm.nd.array(img.astype(\"float32\")), **params)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
        "## Look up synset name\nLook up prediction top 1 index in 1000 class synset.\n\n"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "collapsed": false
      },
      "outputs": [],
      "source": [
        "synset_url = \"\".join(\n    [\n        \"https://raw.githubusercontent.com/Cadene/\",\n        \"pretrained-models.pytorch/master/data/\",\n        \"imagenet_synsets.txt\",\n    ]\n)\nsynset_name = \"imagenet_synsets.txt\"\nsynset_path = download_testdata(synset_url, synset_name, module=\"data\")\nwith open(synset_path) as f:\n    synsets = f.readlines()\n\nsynsets = [x.strip() for x in synsets]\nsplits = [line.split(\" \") for line in synsets]\nkey_to_classname = {spl[0]: \" \".join(spl[1:]) for spl in splits}\n\nclass_url = \"\".join(\n    [\n        \"https://raw.githubusercontent.com/Cadene/\",\n        \"pretrained-models.pytorch/master/data/\",\n        \"imagenet_classes.txt\",\n    ]\n)\nclass_name = \"imagenet_classes.txt\"\nclass_path = download_testdata(class_url, class_name, module=\"data\")\nwith open(class_path) as f:\n    class_id_to_key = f.readlines()\n\nclass_id_to_key = [x.strip() for x in class_id_to_key]\n\n# Get top-1 result for TVM\ntop1_tvm = np.argmax(tvm_output.numpy()[0])\ntvm_class_key = class_id_to_key[top1_tvm]\n\n# Convert input to OneFlow variable and get OneFlow result for comparison\nwith flow.no_grad():\n    torch_img = flow.from_numpy(img)\n    output = model(torch_img)\n\n    # Get top-1 result for OneFlow\n    top_oneflow = np.argmax(output.numpy())\n    oneflow_class_key = class_id_to_key[top_oneflow]\n\nprint(\"Relay top-1 id: {}, class name: {}\".format(top1_tvm, key_to_classname[tvm_class_key]))\nprint(\n    \"OneFlow top-1 id: {}, class name: {}\".format(top_oneflow, key_to_classname[oneflow_class_key])\n)"
      ]
    }
  ],
  "metadata": {
    "kernelspec": {
      "display_name": "Python 3",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.7.5"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}