{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Export to ONNX\n",
    "\n",
    "In this notebook, we export our pytorch model to ONNX so that it can later be used for inference.\n",
    "\n",
    "**1. Import Required Libraries:-** "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from torchvision import transforms\n",
    "import torch\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import random_split, DataLoader\n",
    "import time\n",
    "import cv2\n",
    "import numpy as np\n",
    "import os\n",
    "import matplotlib.pyplot as plt\n",
    "#Local Imports\n",
    "from dataset import HeadposeDataset\n",
    "from model import FSANet\n",
    "import onnx\n",
    "import onnxruntime\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**2. Define Model and Load from Saved Checkpoint:-**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "FSANet(\n",
       "  (msms): MultiStreamMultiStage(\n",
       "    (avgpool): AvgPool2d(kernel_size=2, stride=2, padding=0)\n",
       "    (s0_conv0): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=3)\n",
       "        (pointwise): Conv2d(3, 16, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (s0_conv1_0): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=16)\n",
       "        (pointwise): Conv2d(16, 32, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (s0_conv1_1): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32)\n",
       "        (pointwise): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (s0_conv1_out): Conv2dAct(\n",
       "      (conv): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (s0_conv2_0): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32)\n",
       "        (pointwise): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (s0_conv2_1): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64)\n",
       "        (pointwise): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (s0_conv2_out): Conv2dAct(\n",
       "      (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (s0_conv3_0): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64)\n",
       "        (pointwise): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (s0_conv3_1): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128)\n",
       "        (pointwise): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (s0_conv3_out): Conv2dAct(\n",
       "      (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (maxpool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (s1_conv0): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(3, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=3)\n",
       "        (pointwise): Conv2d(3, 16, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): ReLU()\n",
       "    )\n",
       "    (s1_conv1_0): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=16)\n",
       "        (pointwise): Conv2d(16, 32, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): Tanh()\n",
       "    )\n",
       "    (s1_conv1_1): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32)\n",
       "        (pointwise): Conv2d(32, 32, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): Tanh()\n",
       "    )\n",
       "    (s1_conv1_out): Conv2dAct(\n",
       "      (conv): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      (act): Tanh()\n",
       "    )\n",
       "    (s1_conv2_0): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(32, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=32)\n",
       "        (pointwise): Conv2d(32, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): Tanh()\n",
       "    )\n",
       "    (s1_conv2_1): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64)\n",
       "        (pointwise): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): Tanh()\n",
       "    )\n",
       "    (s1_conv2_out): Conv2dAct(\n",
       "      (conv): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      (act): Tanh()\n",
       "    )\n",
       "    (s1_conv3_0): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=64)\n",
       "        (pointwise): Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): Tanh()\n",
       "    )\n",
       "    (s1_conv3_1): SepConvBlock(\n",
       "      (conv): SepConv2d(\n",
       "        (depthwise): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=128)\n",
       "        (pointwise): Conv2d(128, 128, kernel_size=(1, 1), stride=(1, 1))\n",
       "      )\n",
       "      (bn): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (act): Tanh()\n",
       "    )\n",
       "    (s1_conv3_out): Conv2dAct(\n",
       "      (conv): Conv2d(128, 64, kernel_size=(1, 1), stride=(1, 1))\n",
       "      (act): Tanh()\n",
       "    )\n",
       "  )\n",
       "  (fgsm): FineGrainedStructureMapping(\n",
       "    (attention_maps): ScoringFunction(\n",
       "      (reduce_channel): VarianceC()\n",
       "    )\n",
       "    (fm): Linear(in_features=64, out_features=960, bias=True)\n",
       "    (fc): Linear(in_features=192, out_features=35, bias=True)\n",
       "  )\n",
       "  (caps_layer): CapsuleLayer1d()\n",
       "  (eaf): ExtractAggregatedFeatures()\n",
       "  (esp_s1): ExtractSSRParams(\n",
       "    (shift_fc): Linear(in_features=4, out_features=3, bias=True)\n",
       "    (scale_fc): Linear(in_features=4, out_features=3, bias=True)\n",
       "    (pred_fc): Linear(in_features=8, out_features=9, bias=True)\n",
       "  )\n",
       "  (esp_s2): ExtractSSRParams(\n",
       "    (shift_fc): Linear(in_features=4, out_features=3, bias=True)\n",
       "    (scale_fc): Linear(in_features=4, out_features=3, bias=True)\n",
       "    (pred_fc): Linear(in_features=8, out_features=9, bias=True)\n",
       "  )\n",
       "  (esp_s3): ExtractSSRParams(\n",
       "    (shift_fc): Linear(in_features=4, out_features=3, bias=True)\n",
       "    (scale_fc): Linear(in_features=4, out_features=3, bias=True)\n",
       "    (pred_fc): Linear(in_features=8, out_features=9, bias=True)\n",
       "  )\n",
       "  (ssr): SSRLayer()\n",
       ")"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "device = torch.device(\"cuda\")\n",
    "model = FSANet(var=True).to(device)\n",
    "#Load Model Checkpoint\n",
    "chkpt_dic = torch.load('checkpoints/fsavar-09082020.chkpt')\n",
    "\n",
    "model.load_state_dict(chkpt_dic['best_states']['model'])\n",
    "#set model to inference-ready\n",
    "model.eval()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**3. Export model to ONNX:-**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Export to ONNX\n",
    "x = torch.randn(1,3,64,64).to(device)\n",
    "model_out = model(x)\n",
    "save_path = \"pretrained/fsanet-var-iter-688590.onnx\"\n",
    "\n",
    "torch.onnx.export(model,               # model being run\n",
    "                  x,                         # model input (or a tuple for multiple inputs)\n",
    "                  save_path,   # where to save the model (can be a file or file-like object)\n",
    "                  export_params=True,        # store the trained parameter weights inside the model file\n",
    "                  opset_version=9,          # the ONNX version to export the model to\n",
    "                  do_constant_folding=True,  # whether to execute constant folding for optimization\n",
    "                  input_names = ['input'],   # the model's input names\n",
    "                  output_names = ['output']) # the model's output names"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**4. Reload model from ONNX:-**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "graph torch-jit-export (\n",
      "  %input[FLOAT, 1x3x64x64]\n",
      ") initializers (\n",
      "  %595[INT64, 1]\n",
      "  %596[INT64, 1]\n",
      "  %597[INT64, 1]\n",
      "  %598[INT64, 1]\n",
      "  %599[INT64, 1]\n",
      "  %600[INT64, 1]\n",
      "  %601[INT64, 1]\n",
      "  %602[INT64, 1]\n",
      "  %603[INT64, 1]\n",
      "  %604[INT64, 1]\n",
      "  %605[INT64, 1]\n",
      "  %606[INT64, 1]\n",
      "  %607[INT64, 1]\n",
      "  %608[INT64, 1]\n",
      "  %609[FLOAT, 3x21x64x16]\n",
      "  %610[FLOAT, scalar]\n",
      "  %611[FLOAT, scalar]\n",
      "  %612[INT64, 1]\n",
      "  %613[INT64, 1]\n",
      "  %614[INT64, 1]\n",
      "  %615[INT64, 1]\n",
      "  %616[INT64, 1]\n",
      "  %617[INT64, 1]\n",
      "  %618[INT64, 1]\n",
      "  %619[INT64, 1]\n",
      "  %620[INT64, 1]\n",
      "  %caps_layer.affine_w[FLOAT, 3x21x16x64]\n",
      "  %esp_s1.pred_fc.bias[FLOAT, 9]\n",
      "  %esp_s1.pred_fc.weight[FLOAT, 9x8]\n",
      "  %esp_s1.scale_fc.bias[FLOAT, 3]\n",
      "  %esp_s1.scale_fc.weight[FLOAT, 3x4]\n",
      "  %esp_s1.shift_fc.bias[FLOAT, 3]\n",
      "  %esp_s1.shift_fc.weight[FLOAT, 3x4]\n",
      "  %esp_s2.pred_fc.bias[FLOAT, 9]\n",
      "  %esp_s2.pred_fc.weight[FLOAT, 9x8]\n",
      "  %esp_s2.scale_fc.bias[FLOAT, 3]\n",
      "  %esp_s2.scale_fc.weight[FLOAT, 3x4]\n",
      "  %esp_s2.shift_fc.bias[FLOAT, 3]\n",
      "  %esp_s2.shift_fc.weight[FLOAT, 3x4]\n",
      "  %esp_s3.pred_fc.bias[FLOAT, 9]\n",
      "  %esp_s3.pred_fc.weight[FLOAT, 9x8]\n",
      "  %esp_s3.scale_fc.bias[FLOAT, 3]\n",
      "  %esp_s3.scale_fc.weight[FLOAT, 3x4]\n",
      "  %esp_s3.shift_fc.bias[FLOAT, 3]\n",
      "  %esp_s3.shift_fc.weight[FLOAT, 3x4]\n",
      "  %fgsm.fc.bias[FLOAT, 35]\n",
      "  %fgsm.fc.weight[FLOAT, 35x192]\n",
      "  %fgsm.fm.bias[FLOAT, 960]\n",
      "  %fgsm.fm.weight[FLOAT, 960x64]\n",
      "  %msms.s0_conv0.bn.bias[FLOAT, 16]\n",
      "  %msms.s0_conv0.bn.running_mean[FLOAT, 16]\n",
      "  %msms.s0_conv0.bn.running_var[FLOAT, 16]\n",
      "  %msms.s0_conv0.bn.weight[FLOAT, 16]\n",
      "  %msms.s0_conv0.conv.depthwise.bias[FLOAT, 3]\n",
      "  %msms.s0_conv0.conv.depthwise.weight[FLOAT, 3x1x3x3]\n",
      "  %msms.s0_conv0.conv.pointwise.bias[FLOAT, 16]\n",
      "  %msms.s0_conv0.conv.pointwise.weight[FLOAT, 16x3x1x1]\n",
      "  %msms.s0_conv1_0.bn.bias[FLOAT, 32]\n",
      "  %msms.s0_conv1_0.bn.running_mean[FLOAT, 32]\n",
      "  %msms.s0_conv1_0.bn.running_var[FLOAT, 32]\n",
      "  %msms.s0_conv1_0.bn.weight[FLOAT, 32]\n",
      "  %msms.s0_conv1_0.conv.depthwise.bias[FLOAT, 16]\n",
      "  %msms.s0_conv1_0.conv.depthwise.weight[FLOAT, 16x1x3x3]\n",
      "  %msms.s0_conv1_0.conv.pointwise.bias[FLOAT, 32]\n",
      "  %msms.s0_conv1_0.conv.pointwise.weight[FLOAT, 32x16x1x1]\n",
      "  %msms.s0_conv1_1.bn.bias[FLOAT, 32]\n",
      "  %msms.s0_conv1_1.bn.running_mean[FLOAT, 32]\n",
      "  %msms.s0_conv1_1.bn.running_var[FLOAT, 32]\n",
      "  %msms.s0_conv1_1.bn.weight[FLOAT, 32]\n",
      "  %msms.s0_conv1_1.conv.depthwise.bias[FLOAT, 32]\n",
      "  %msms.s0_conv1_1.conv.depthwise.weight[FLOAT, 32x1x3x3]\n",
      "  %msms.s0_conv1_1.conv.pointwise.bias[FLOAT, 32]\n",
      "  %msms.s0_conv1_1.conv.pointwise.weight[FLOAT, 32x32x1x1]\n",
      "  %msms.s0_conv1_out.conv.bias[FLOAT, 64]\n",
      "  %msms.s0_conv1_out.conv.weight[FLOAT, 64x32x1x1]\n",
      "  %msms.s0_conv2_0.bn.bias[FLOAT, 64]\n",
      "  %msms.s0_conv2_0.bn.running_mean[FLOAT, 64]\n",
      "  %msms.s0_conv2_0.bn.running_var[FLOAT, 64]\n",
      "  %msms.s0_conv2_0.bn.weight[FLOAT, 64]\n",
      "  %msms.s0_conv2_0.conv.depthwise.bias[FLOAT, 32]\n",
      "  %msms.s0_conv2_0.conv.depthwise.weight[FLOAT, 32x1x3x3]\n",
      "  %msms.s0_conv2_0.conv.pointwise.bias[FLOAT, 64]\n",
      "  %msms.s0_conv2_0.conv.pointwise.weight[FLOAT, 64x32x1x1]\n",
      "  %msms.s0_conv2_1.bn.bias[FLOAT, 64]\n",
      "  %msms.s0_conv2_1.bn.running_mean[FLOAT, 64]\n",
      "  %msms.s0_conv2_1.bn.running_var[FLOAT, 64]\n",
      "  %msms.s0_conv2_1.bn.weight[FLOAT, 64]\n",
      "  %msms.s0_conv2_1.conv.depthwise.bias[FLOAT, 64]\n",
      "  %msms.s0_conv2_1.conv.depthwise.weight[FLOAT, 64x1x3x3]\n",
      "  %msms.s0_conv2_1.conv.pointwise.bias[FLOAT, 64]\n",
      "  %msms.s0_conv2_1.conv.pointwise.weight[FLOAT, 64x64x1x1]\n",
      "  %msms.s0_conv2_out.conv.bias[FLOAT, 64]\n",
      "  %msms.s0_conv2_out.conv.weight[FLOAT, 64x64x1x1]\n",
      "  %msms.s0_conv3_0.bn.bias[FLOAT, 128]\n",
      "  %msms.s0_conv3_0.bn.running_mean[FLOAT, 128]\n",
      "  %msms.s0_conv3_0.bn.running_var[FLOAT, 128]\n",
      "  %msms.s0_conv3_0.bn.weight[FLOAT, 128]\n",
      "  %msms.s0_conv3_0.conv.depthwise.bias[FLOAT, 64]\n",
      "  %msms.s0_conv3_0.conv.depthwise.weight[FLOAT, 64x1x3x3]\n",
      "  %msms.s0_conv3_0.conv.pointwise.bias[FLOAT, 128]\n",
      "  %msms.s0_conv3_0.conv.pointwise.weight[FLOAT, 128x64x1x1]\n",
      "  %msms.s0_conv3_1.bn.bias[FLOAT, 128]\n",
      "  %msms.s0_conv3_1.bn.running_mean[FLOAT, 128]\n",
      "  %msms.s0_conv3_1.bn.running_var[FLOAT, 128]\n",
      "  %msms.s0_conv3_1.bn.weight[FLOAT, 128]\n",
      "  %msms.s0_conv3_1.conv.depthwise.bias[FLOAT, 128]\n",
      "  %msms.s0_conv3_1.conv.depthwise.weight[FLOAT, 128x1x3x3]\n",
      "  %msms.s0_conv3_1.conv.pointwise.bias[FLOAT, 128]\n",
      "  %msms.s0_conv3_1.conv.pointwise.weight[FLOAT, 128x128x1x1]\n",
      "  %msms.s0_conv3_out.conv.bias[FLOAT, 64]\n",
      "  %msms.s0_conv3_out.conv.weight[FLOAT, 64x128x1x1]\n",
      "  %msms.s1_conv0.bn.bias[FLOAT, 16]\n",
      "  %msms.s1_conv0.bn.running_mean[FLOAT, 16]\n",
      "  %msms.s1_conv0.bn.running_var[FLOAT, 16]\n",
      "  %msms.s1_conv0.bn.weight[FLOAT, 16]\n",
      "  %msms.s1_conv0.conv.depthwise.bias[FLOAT, 3]\n",
      "  %msms.s1_conv0.conv.depthwise.weight[FLOAT, 3x1x3x3]\n",
      "  %msms.s1_conv0.conv.pointwise.bias[FLOAT, 16]\n",
      "  %msms.s1_conv0.conv.pointwise.weight[FLOAT, 16x3x1x1]\n",
      "  %msms.s1_conv1_0.bn.bias[FLOAT, 32]\n",
      "  %msms.s1_conv1_0.bn.running_mean[FLOAT, 32]\n",
      "  %msms.s1_conv1_0.bn.running_var[FLOAT, 32]\n",
      "  %msms.s1_conv1_0.bn.weight[FLOAT, 32]\n",
      "  %msms.s1_conv1_0.conv.depthwise.bias[FLOAT, 16]\n",
      "  %msms.s1_conv1_0.conv.depthwise.weight[FLOAT, 16x1x3x3]\n",
      "  %msms.s1_conv1_0.conv.pointwise.bias[FLOAT, 32]\n",
      "  %msms.s1_conv1_0.conv.pointwise.weight[FLOAT, 32x16x1x1]\n",
      "  %msms.s1_conv1_1.bn.bias[FLOAT, 32]\n",
      "  %msms.s1_conv1_1.bn.running_mean[FLOAT, 32]\n",
      "  %msms.s1_conv1_1.bn.running_var[FLOAT, 32]\n",
      "  %msms.s1_conv1_1.bn.weight[FLOAT, 32]\n",
      "  %msms.s1_conv1_1.conv.depthwise.bias[FLOAT, 32]\n",
      "  %msms.s1_conv1_1.conv.depthwise.weight[FLOAT, 32x1x3x3]\n",
      "  %msms.s1_conv1_1.conv.pointwise.bias[FLOAT, 32]\n",
      "  %msms.s1_conv1_1.conv.pointwise.weight[FLOAT, 32x32x1x1]\n",
      "  %msms.s1_conv1_out.conv.bias[FLOAT, 64]\n",
      "  %msms.s1_conv1_out.conv.weight[FLOAT, 64x32x1x1]\n",
      "  %msms.s1_conv2_0.bn.bias[FLOAT, 64]\n",
      "  %msms.s1_conv2_0.bn.running_mean[FLOAT, 64]\n",
      "  %msms.s1_conv2_0.bn.running_var[FLOAT, 64]\n",
      "  %msms.s1_conv2_0.bn.weight[FLOAT, 64]\n",
      "  %msms.s1_conv2_0.conv.depthwise.bias[FLOAT, 32]\n",
      "  %msms.s1_conv2_0.conv.depthwise.weight[FLOAT, 32x1x3x3]\n",
      "  %msms.s1_conv2_0.conv.pointwise.bias[FLOAT, 64]\n",
      "  %msms.s1_conv2_0.conv.pointwise.weight[FLOAT, 64x32x1x1]\n",
      "  %msms.s1_conv2_1.bn.bias[FLOAT, 64]\n",
      "  %msms.s1_conv2_1.bn.running_mean[FLOAT, 64]\n",
      "  %msms.s1_conv2_1.bn.running_var[FLOAT, 64]\n",
      "  %msms.s1_conv2_1.bn.weight[FLOAT, 64]\n",
      "  %msms.s1_conv2_1.conv.depthwise.bias[FLOAT, 64]\n",
      "  %msms.s1_conv2_1.conv.depthwise.weight[FLOAT, 64x1x3x3]\n",
      "  %msms.s1_conv2_1.conv.pointwise.bias[FLOAT, 64]\n",
      "  %msms.s1_conv2_1.conv.pointwise.weight[FLOAT, 64x64x1x1]\n",
      "  %msms.s1_conv2_out.conv.bias[FLOAT, 64]\n",
      "  %msms.s1_conv2_out.conv.weight[FLOAT, 64x64x1x1]\n",
      "  %msms.s1_conv3_0.bn.bias[FLOAT, 128]\n",
      "  %msms.s1_conv3_0.bn.running_mean[FLOAT, 128]\n",
      "  %msms.s1_conv3_0.bn.running_var[FLOAT, 128]\n",
      "  %msms.s1_conv3_0.bn.weight[FLOAT, 128]\n",
      "  %msms.s1_conv3_0.conv.depthwise.bias[FLOAT, 64]\n",
      "  %msms.s1_conv3_0.conv.depthwise.weight[FLOAT, 64x1x3x3]\n",
      "  %msms.s1_conv3_0.conv.pointwise.bias[FLOAT, 128]\n",
      "  %msms.s1_conv3_0.conv.pointwise.weight[FLOAT, 128x64x1x1]\n",
      "  %msms.s1_conv3_1.bn.bias[FLOAT, 128]\n",
      "  %msms.s1_conv3_1.bn.running_mean[FLOAT, 128]\n",
      "  %msms.s1_conv3_1.bn.running_var[FLOAT, 128]\n",
      "  %msms.s1_conv3_1.bn.weight[FLOAT, 128]\n",
      "  %msms.s1_conv3_1.conv.depthwise.bias[FLOAT, 128]\n",
      "  %msms.s1_conv3_1.conv.depthwise.weight[FLOAT, 128x1x3x3]\n",
      "  %msms.s1_conv3_1.conv.pointwise.bias[FLOAT, 128]\n",
      "  %msms.s1_conv3_1.conv.pointwise.weight[FLOAT, 128x128x1x1]\n",
      "  %msms.s1_conv3_out.conv.bias[FLOAT, 64]\n",
      "  %msms.s1_conv3_out.conv.weight[FLOAT, 64x128x1x1]\n",
      ") {\n",
      "  %162 = Conv[dilations = [1, 1], group = 3, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input, %msms.s0_conv0.conv.depthwise.weight, %msms.s0_conv0.conv.depthwise.bias)\n",
      "  %163 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%162, %msms.s0_conv0.conv.pointwise.weight, %msms.s0_conv0.conv.pointwise.bias)\n",
      "  %164 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%163, %msms.s0_conv0.bn.weight, %msms.s0_conv0.bn.bias, %msms.s0_conv0.bn.running_mean, %msms.s0_conv0.bn.running_var)\n",
      "  %165 = Relu(%164)\n",
      "  %166 = Pad[mode = 'constant', pads = [0, 0, 0, 0, 0, 0, 0, 0], value = 0](%165)\n",
      "  %167 = AveragePool[kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%166)\n",
      "  %168 = Conv[dilations = [1, 1], group = 3, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%input, %msms.s1_conv0.conv.depthwise.weight, %msms.s1_conv0.conv.depthwise.bias)\n",
      "  %169 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%168, %msms.s1_conv0.conv.pointwise.weight, %msms.s1_conv0.conv.pointwise.bias)\n",
      "  %170 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%169, %msms.s1_conv0.bn.weight, %msms.s1_conv0.bn.bias, %msms.s1_conv0.bn.running_mean, %msms.s1_conv0.bn.running_var)\n",
      "  %171 = Relu(%170)\n",
      "  %172 = MaxPool[kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%171)\n",
      "  %173 = Conv[dilations = [1, 1], group = 16, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%167, %msms.s0_conv1_0.conv.depthwise.weight, %msms.s0_conv1_0.conv.depthwise.bias)\n",
      "  %174 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%173, %msms.s0_conv1_0.conv.pointwise.weight, %msms.s0_conv1_0.conv.pointwise.bias)\n",
      "  %175 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%174, %msms.s0_conv1_0.bn.weight, %msms.s0_conv1_0.bn.bias, %msms.s0_conv1_0.bn.running_mean, %msms.s0_conv1_0.bn.running_var)\n",
      "  %176 = Relu(%175)\n",
      "  %177 = Conv[dilations = [1, 1], group = 32, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%176, %msms.s0_conv1_1.conv.depthwise.weight, %msms.s0_conv1_1.conv.depthwise.bias)\n",
      "  %178 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%177, %msms.s0_conv1_1.conv.pointwise.weight, %msms.s0_conv1_1.conv.pointwise.bias)\n",
      "  %179 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%178, %msms.s0_conv1_1.bn.weight, %msms.s0_conv1_1.bn.bias, %msms.s0_conv1_1.bn.running_mean, %msms.s0_conv1_1.bn.running_var)\n",
      "  %180 = Relu(%179)\n",
      "  %181 = Pad[mode = 'constant', pads = [0, 0, 0, 0, 0, 0, 0, 0], value = 0](%180)\n",
      "  %182 = AveragePool[kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%181)\n",
      "  %183 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%182, %msms.s0_conv1_out.conv.weight, %msms.s0_conv1_out.conv.bias)\n",
      "  %184 = Relu(%183)\n",
      "  %185 = Conv[dilations = [1, 1], group = 16, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%172, %msms.s1_conv1_0.conv.depthwise.weight, %msms.s1_conv1_0.conv.depthwise.bias)\n",
      "  %186 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%185, %msms.s1_conv1_0.conv.pointwise.weight, %msms.s1_conv1_0.conv.pointwise.bias)\n",
      "  %187 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%186, %msms.s1_conv1_0.bn.weight, %msms.s1_conv1_0.bn.bias, %msms.s1_conv1_0.bn.running_mean, %msms.s1_conv1_0.bn.running_var)\n",
      "  %188 = Tanh(%187)\n",
      "  %189 = Conv[dilations = [1, 1], group = 32, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%188, %msms.s1_conv1_1.conv.depthwise.weight, %msms.s1_conv1_1.conv.depthwise.bias)\n",
      "  %190 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%189, %msms.s1_conv1_1.conv.pointwise.weight, %msms.s1_conv1_1.conv.pointwise.bias)\n",
      "  %191 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%190, %msms.s1_conv1_1.bn.weight, %msms.s1_conv1_1.bn.bias, %msms.s1_conv1_1.bn.running_mean, %msms.s1_conv1_1.bn.running_var)\n",
      "  %192 = Tanh(%191)\n",
      "  %193 = MaxPool[kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%192)\n",
      "  %194 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%193, %msms.s1_conv1_out.conv.weight, %msms.s1_conv1_out.conv.bias)\n",
      "  %195 = Tanh(%194)\n",
      "  %196 = Mul(%184, %195)\n",
      "  %197 = Pad[mode = 'constant', pads = [0, 0, 0, 0, 0, 0, 0, 0], value = 0](%196)\n",
      "  %198 = AveragePool[kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%197)\n",
      "  %199 = Conv[dilations = [1, 1], group = 32, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%182, %msms.s0_conv2_0.conv.depthwise.weight, %msms.s0_conv2_0.conv.depthwise.bias)\n",
      "  %200 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%199, %msms.s0_conv2_0.conv.pointwise.weight, %msms.s0_conv2_0.conv.pointwise.bias)\n",
      "  %201 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%200, %msms.s0_conv2_0.bn.weight, %msms.s0_conv2_0.bn.bias, %msms.s0_conv2_0.bn.running_mean, %msms.s0_conv2_0.bn.running_var)\n",
      "  %202 = Relu(%201)\n",
      "  %203 = Conv[dilations = [1, 1], group = 64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%202, %msms.s0_conv2_1.conv.depthwise.weight, %msms.s0_conv2_1.conv.depthwise.bias)\n",
      "  %204 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%203, %msms.s0_conv2_1.conv.pointwise.weight, %msms.s0_conv2_1.conv.pointwise.bias)\n",
      "  %205 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%204, %msms.s0_conv2_1.bn.weight, %msms.s0_conv2_1.bn.bias, %msms.s0_conv2_1.bn.running_mean, %msms.s0_conv2_1.bn.running_var)\n",
      "  %206 = Relu(%205)\n",
      "  %207 = Pad[mode = 'constant', pads = [0, 0, 0, 0, 0, 0, 0, 0], value = 0](%206)\n",
      "  %208 = AveragePool[kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%207)\n",
      "  %209 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%208, %msms.s0_conv2_out.conv.weight, %msms.s0_conv2_out.conv.bias)\n",
      "  %210 = Relu(%209)\n",
      "  %211 = Conv[dilations = [1, 1], group = 32, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%193, %msms.s1_conv2_0.conv.depthwise.weight, %msms.s1_conv2_0.conv.depthwise.bias)\n",
      "  %212 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%211, %msms.s1_conv2_0.conv.pointwise.weight, %msms.s1_conv2_0.conv.pointwise.bias)\n",
      "  %213 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%212, %msms.s1_conv2_0.bn.weight, %msms.s1_conv2_0.bn.bias, %msms.s1_conv2_0.bn.running_mean, %msms.s1_conv2_0.bn.running_var)\n",
      "  %214 = Tanh(%213)\n",
      "  %215 = Conv[dilations = [1, 1], group = 64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%214, %msms.s1_conv2_1.conv.depthwise.weight, %msms.s1_conv2_1.conv.depthwise.bias)\n",
      "  %216 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%215, %msms.s1_conv2_1.conv.pointwise.weight, %msms.s1_conv2_1.conv.pointwise.bias)\n",
      "  %217 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%216, %msms.s1_conv2_1.bn.weight, %msms.s1_conv2_1.bn.bias, %msms.s1_conv2_1.bn.running_mean, %msms.s1_conv2_1.bn.running_var)\n",
      "  %218 = Tanh(%217)\n",
      "  %219 = MaxPool[kernel_shape = [2, 2], pads = [0, 0, 0, 0], strides = [2, 2]](%218)\n",
      "  %220 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%219, %msms.s1_conv2_out.conv.weight, %msms.s1_conv2_out.conv.bias)\n",
      "  %221 = Tanh(%220)\n",
      "  %222 = Mul(%210, %221)\n",
      "  %223 = Conv[dilations = [1, 1], group = 64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%208, %msms.s0_conv3_0.conv.depthwise.weight, %msms.s0_conv3_0.conv.depthwise.bias)\n",
      "  %224 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%223, %msms.s0_conv3_0.conv.pointwise.weight, %msms.s0_conv3_0.conv.pointwise.bias)\n",
      "  %225 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%224, %msms.s0_conv3_0.bn.weight, %msms.s0_conv3_0.bn.bias, %msms.s0_conv3_0.bn.running_mean, %msms.s0_conv3_0.bn.running_var)\n",
      "  %226 = Relu(%225)\n",
      "  %227 = Conv[dilations = [1, 1], group = 128, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%226, %msms.s0_conv3_1.conv.depthwise.weight, %msms.s0_conv3_1.conv.depthwise.bias)\n",
      "  %228 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%227, %msms.s0_conv3_1.conv.pointwise.weight, %msms.s0_conv3_1.conv.pointwise.bias)\n",
      "  %229 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%228, %msms.s0_conv3_1.bn.weight, %msms.s0_conv3_1.bn.bias, %msms.s0_conv3_1.bn.running_mean, %msms.s0_conv3_1.bn.running_var)\n",
      "  %230 = Relu(%229)\n",
      "  %231 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%230, %msms.s0_conv3_out.conv.weight, %msms.s0_conv3_out.conv.bias)\n",
      "  %232 = Relu(%231)\n",
      "  %233 = Conv[dilations = [1, 1], group = 64, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%219, %msms.s1_conv3_0.conv.depthwise.weight, %msms.s1_conv3_0.conv.depthwise.bias)\n",
      "  %234 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%233, %msms.s1_conv3_0.conv.pointwise.weight, %msms.s1_conv3_0.conv.pointwise.bias)\n",
      "  %235 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%234, %msms.s1_conv3_0.bn.weight, %msms.s1_conv3_0.bn.bias, %msms.s1_conv3_0.bn.running_mean, %msms.s1_conv3_0.bn.running_var)\n",
      "  %236 = Tanh(%235)\n",
      "  %237 = Conv[dilations = [1, 1], group = 128, kernel_shape = [3, 3], pads = [1, 1, 1, 1], strides = [1, 1]](%236, %msms.s1_conv3_1.conv.depthwise.weight, %msms.s1_conv3_1.conv.depthwise.bias)\n",
      "  %238 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%237, %msms.s1_conv3_1.conv.pointwise.weight, %msms.s1_conv3_1.conv.pointwise.bias)\n",
      "  %239 = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.899999976158142](%238, %msms.s1_conv3_1.bn.weight, %msms.s1_conv3_1.bn.bias, %msms.s1_conv3_1.bn.running_mean, %msms.s1_conv3_1.bn.running_var)\n",
      "  %240 = Tanh(%239)\n",
      "  %241 = Conv[dilations = [1, 1], group = 1, kernel_shape = [1, 1], pads = [0, 0, 0, 0], strides = [1, 1]](%240, %msms.s1_conv3_out.conv.weight, %msms.s1_conv3_out.conv.bias)\n",
      "  %242 = Tanh(%241)\n",
      "  %243 = Mul(%232, %242)\n",
      "  %244 = ReduceMean[axes = [1], keepdims = 1](%243)\n",
      "  %245 = Sub(%243, %244)\n",
      "  %246 = Mul(%245, %245)\n",
      "  %247 = ReduceMean[axes = [1], keepdims = 1](%246)\n",
      "  %248 = Shape(%247)\n",
      "  %249 = Constant[value = <Scalar Tensor []>]()\n",
      "  %250 = Gather[axis = 0](%248, %249)\n",
      "  %252 = Unsqueeze[axes = [0]](%250)\n",
      "  %254 = Concat[axis = 0](%252, %595)\n",
      "  %255 = Reshape(%247, %254)\n",
      "  %256 = ReduceMean[axes = [1], keepdims = 1](%222)\n",
      "  %257 = Sub(%222, %256)\n",
      "  %258 = Mul(%257, %257)\n",
      "  %259 = ReduceMean[axes = [1], keepdims = 1](%258)\n",
      "  %260 = Shape(%259)\n",
      "  %261 = Constant[value = <Scalar Tensor []>]()\n",
      "  %262 = Gather[axis = 0](%260, %261)\n",
      "  %264 = Unsqueeze[axes = [0]](%262)\n",
      "  %266 = Concat[axis = 0](%264, %596)\n",
      "  %267 = Reshape(%259, %266)\n",
      "  %268 = ReduceMean[axes = [1], keepdims = 1](%198)\n",
      "  %269 = Sub(%198, %268)\n",
      "  %270 = Mul(%269, %269)\n",
      "  %271 = ReduceMean[axes = [1], keepdims = 1](%270)\n",
      "  %272 = Shape(%271)\n",
      "  %273 = Constant[value = <Scalar Tensor []>]()\n",
      "  %274 = Gather[axis = 0](%272, %273)\n",
      "  %276 = Unsqueeze[axes = [0]](%274)\n",
      "  %278 = Concat[axis = 0](%276, %597)\n",
      "  %279 = Reshape(%271, %278)\n",
      "  %280 = Concat[axis = 1](%255, %267, %279)\n",
      "  %281 = Gemm[alpha = 1, beta = 1, transB = 1](%280, %fgsm.fc.weight, %fgsm.fc.bias)\n",
      "  %282 = Sigmoid(%281)\n",
      "  %283 = Shape(%282)\n",
      "  %284 = Constant[value = <Scalar Tensor []>]()\n",
      "  %285 = Gather[axis = 0](%283, %284)\n",
      "  %288 = Unsqueeze[axes = [0]](%285)\n",
      "  %291 = Concat[axis = 0](%288, %598, %599)\n",
      "  %292 = Reshape(%282, %291)\n",
      "  %293 = Gemm[alpha = 1, beta = 1, transB = 1](%255, %fgsm.fm.weight, %fgsm.fm.bias)\n",
      "  %294 = Sigmoid(%293)\n",
      "  %295 = Shape(%294)\n",
      "  %296 = Constant[value = <Scalar Tensor []>]()\n",
      "  %297 = Gather[axis = 0](%295, %296)\n",
      "  %300 = Unsqueeze[axes = [0]](%297)\n",
      "  %303 = Concat[axis = 0](%300, %600, %601)\n",
      "  %304 = Reshape(%294, %303)\n",
      "  %305 = Gemm[alpha = 1, beta = 1, transB = 1](%267, %fgsm.fm.weight, %fgsm.fm.bias)\n",
      "  %306 = Sigmoid(%305)\n",
      "  %307 = Shape(%306)\n",
      "  %308 = Constant[value = <Scalar Tensor []>]()\n",
      "  %309 = Gather[axis = 0](%307, %308)\n",
      "  %312 = Unsqueeze[axes = [0]](%309)\n",
      "  %315 = Concat[axis = 0](%312, %602, %603)\n",
      "  %316 = Reshape(%306, %315)\n",
      "  %317 = Gemm[alpha = 1, beta = 1, transB = 1](%279, %fgsm.fm.weight, %fgsm.fm.bias)\n",
      "  %318 = Sigmoid(%317)\n",
      "  %319 = Shape(%318)\n",
      "  %320 = Constant[value = <Scalar Tensor []>]()\n",
      "  %321 = Gather[axis = 0](%319, %320)\n",
      "  %324 = Unsqueeze[axes = [0]](%321)\n",
      "  %327 = Concat[axis = 0](%324, %604, %605)\n",
      "  %328 = Reshape(%318, %327)\n",
      "  %329 = MatMul(%292, %304)\n",
      "  %330 = MatMul(%292, %316)\n",
      "  %331 = MatMul(%292, %328)\n",
      "  %332 = Shape(%243)\n",
      "  %333 = Constant[value = <Scalar Tensor []>]()\n",
      "  %334 = Gather[axis = 0](%332, %333)\n",
      "  %335 = Shape(%243)\n",
      "  %336 = Constant[value = <Scalar Tensor []>]()\n",
      "  %337 = Gather[axis = 0](%335, %336)\n",
      "  %338 = Shape(%243)\n",
      "  %339 = Constant[value = <Scalar Tensor []>]()\n",
      "  %340 = Gather[axis = 0](%338, %339)\n",
      "  %341 = Mul(%337, %340)\n",
      "  %344 = Unsqueeze[axes = [0]](%341)\n",
      "  %345 = Unsqueeze[axes = [0]](%334)\n",
      "  %346 = Concat[axis = 0](%606, %344, %345)\n",
      "  %347 = Reshape(%243, %346)\n",
      "  %348 = Mul(%337, %340)\n",
      "  %351 = Unsqueeze[axes = [0]](%348)\n",
      "  %352 = Unsqueeze[axes = [0]](%334)\n",
      "  %353 = Concat[axis = 0](%607, %351, %352)\n",
      "  %354 = Reshape(%222, %353)\n",
      "  %355 = Mul(%337, %340)\n",
      "  %358 = Unsqueeze[axes = [0]](%355)\n",
      "  %359 = Unsqueeze[axes = [0]](%334)\n",
      "  %360 = Concat[axis = 0](%608, %358, %359)\n",
      "  %361 = Reshape(%198, %360)\n",
      "  %362 = Concat[axis = 1](%347, %354, %361)\n",
      "  %363 = MatMul(%329, %362)\n",
      "  %364 = MatMul(%330, %362)\n",
      "  %365 = MatMul(%331, %362)\n",
      "  %366 = ReduceSum[axes = [-1], keepdims = 1](%329)\n",
      "  %367 = Constant[value = <Scalar Tensor []>]()\n",
      "  %368 = Add(%366, %367)\n",
      "  %369 = ReduceSum[axes = [-1], keepdims = 1](%330)\n",
      "  %370 = Constant[value = <Scalar Tensor []>]()\n",
      "  %371 = Add(%369, %370)\n",
      "  %372 = ReduceSum[axes = [-1], keepdims = 1](%331)\n",
      "  %373 = Constant[value = <Scalar Tensor []>]()\n",
      "  %374 = Add(%372, %373)\n",
      "  %375 = Div(%363, %368)\n",
      "  %376 = Div(%364, %371)\n",
      "  %377 = Div(%365, %374)\n",
      "  %378 = Concat[axis = 1](%375, %376, %377)\n",
      "  %379 = Unsqueeze[axes = [1]](%378)\n",
      "  %380 = Unsqueeze[axes = [3]](%379)\n",
      "  %382 = MatMul(%380, %609)\n",
      "  %383 = Squeeze[axes = [3]](%382)\n",
      "  %384 = Shape(%caps_layer.affine_w)\n",
      "  %385 = Constant[value = <Scalar Tensor []>]()\n",
      "  %386 = Gather[axis = 0](%384, %385)\n",
      "  %387 = Shape(%caps_layer.affine_w)\n",
      "  %388 = Constant[value = <Scalar Tensor []>]()\n",
      "  %389 = Gather[axis = 0](%387, %388)\n",
      "  %390 = Shape(%383)\n",
      "  %391 = Constant[value = <Scalar Tensor []>]()\n",
      "  %392 = Gather[axis = 0](%390, %391)\n",
      "  %393 = Unsqueeze[axes = [0]](%392)\n",
      "  %394 = Unsqueeze[axes = [0]](%386)\n",
      "  %395 = Unsqueeze[axes = [0]](%389)\n",
      "  %396 = Concat[axis = 0](%393, %394, %395)\n",
      "  %397 = ConstantOfShape[value = <Tensor>](%396)\n",
      "  %398 = Cast[to = 1](%397)\n",
      "  %399 = Exp(%398)\n",
      "  %400 = ReduceSum[axes = [1]](%399)\n",
      "  %401 = Div(%399, %400)\n",
      "  %402 = Unsqueeze[axes = [2]](%401)\n",
      "  %403 = MatMul(%402, %383)\n",
      "  %406 = Pow(%403, %610)\n",
      "  %407 = ReduceSum[axes = [-1], keepdims = 1](%406)\n",
      "  %408 = Constant[value = <Scalar Tensor []>]()\n",
      "  %409 = Add(%407, %408)\n",
      "  %410 = Div(%407, %409)\n",
      "  %411 = Mul(%410, %403)\n",
      "  %412 = Sqrt(%407)\n",
      "  %413 = Constant[value = <Scalar Tensor []>]()\n",
      "  %414 = Add(%412, %413)\n",
      "  %415 = Div(%411, %414)\n",
      "  %416 = Transpose[perm = [0, 1, 3, 2]](%415)\n",
      "  %417 = MatMul(%383, %416)\n",
      "  %418 = Squeeze[axes = [3]](%417)\n",
      "  %419 = Add(%398, %418)\n",
      "  %420 = Exp(%419)\n",
      "  %421 = ReduceSum[axes = [1]](%420)\n",
      "  %422 = Div(%420, %421)\n",
      "  %423 = Unsqueeze[axes = [2]](%422)\n",
      "  %424 = MatMul(%423, %383)\n",
      "  %427 = Pow(%424, %611)\n",
      "  %428 = ReduceSum[axes = [-1], keepdims = 1](%427)\n",
      "  %429 = Constant[value = <Scalar Tensor []>]()\n",
      "  %430 = Add(%428, %429)\n",
      "  %431 = Div(%428, %430)\n",
      "  %432 = Mul(%431, %424)\n",
      "  %433 = Sqrt(%428)\n",
      "  %434 = Constant[value = <Scalar Tensor []>]()\n",
      "  %435 = Add(%433, %434)\n",
      "  %436 = Div(%432, %435)\n",
      "  %437 = Squeeze[axes = [2]](%436)\n",
      "  %438 = Shape(%437)\n",
      "  %439 = Constant[value = <Scalar Tensor []>]()\n",
      "  %440 = Gather[axis = 0](%438, %439)\n",
      "  %441 = Slice[axes = [1], ends = [1], starts = [0]](%437)\n",
      "  %443 = Unsqueeze[axes = [0]](%440)\n",
      "  %445 = Concat[axis = 0](%443, %612)\n",
      "  %446 = Reshape(%441, %445)\n",
      "  %447 = Slice[axes = [1], ends = [2], starts = [1]](%437)\n",
      "  %449 = Unsqueeze[axes = [0]](%440)\n",
      "  %451 = Concat[axis = 0](%449, %613)\n",
      "  %452 = Reshape(%447, %451)\n",
      "  %453 = Slice[axes = [1], ends = [3], starts = [2]](%437)\n",
      "  %455 = Unsqueeze[axes = [0]](%440)\n",
      "  %457 = Concat[axis = 0](%455, %614)\n",
      "  %458 = Reshape(%453, %457)\n",
      "  %459 = Slice[axes = [1], ends = [4], starts = [0]](%446)\n",
      "  %460 = Gemm[alpha = 1, beta = 1, transB = 1](%459, %esp_s1.shift_fc.weight, %esp_s1.shift_fc.bias)\n",
      "  %461 = Tanh(%460)\n",
      "  %462 = Slice[axes = [1], ends = [8], starts = [4]](%446)\n",
      "  %463 = Gemm[alpha = 1, beta = 1, transB = 1](%462, %esp_s1.scale_fc.weight, %esp_s1.scale_fc.bias)\n",
      "  %464 = Tanh(%463)\n",
      "  %465 = Slice[axes = [1], ends = [9223372036854775807], starts = [8]](%446)\n",
      "  %466 = Gemm[alpha = 1, beta = 1, transB = 1](%465, %esp_s1.pred_fc.weight, %esp_s1.pred_fc.bias)\n",
      "  %467 = Relu(%466)\n",
      "  %468 = Shape(%467)\n",
      "  %469 = Constant[value = <Scalar Tensor []>]()\n",
      "  %470 = Gather[axis = 0](%468, %469)\n",
      "  %473 = Unsqueeze[axes = [0]](%470)\n",
      "  %476 = Concat[axis = 0](%473, %615, %616)\n",
      "  %477 = Reshape(%467, %476)\n",
      "  %478 = Slice[axes = [1], ends = [4], starts = [0]](%452)\n",
      "  %479 = Gemm[alpha = 1, beta = 1, transB = 1](%478, %esp_s2.shift_fc.weight, %esp_s2.shift_fc.bias)\n",
      "  %480 = Tanh(%479)\n",
      "  %481 = Slice[axes = [1], ends = [8], starts = [4]](%452)\n",
      "  %482 = Gemm[alpha = 1, beta = 1, transB = 1](%481, %esp_s2.scale_fc.weight, %esp_s2.scale_fc.bias)\n",
      "  %483 = Tanh(%482)\n",
      "  %484 = Slice[axes = [1], ends = [9223372036854775807], starts = [8]](%452)\n",
      "  %485 = Gemm[alpha = 1, beta = 1, transB = 1](%484, %esp_s2.pred_fc.weight, %esp_s2.pred_fc.bias)\n",
      "  %486 = Relu(%485)\n",
      "  %487 = Shape(%486)\n",
      "  %488 = Constant[value = <Scalar Tensor []>]()\n",
      "  %489 = Gather[axis = 0](%487, %488)\n",
      "  %492 = Unsqueeze[axes = [0]](%489)\n",
      "  %495 = Concat[axis = 0](%492, %617, %618)\n",
      "  %496 = Reshape(%486, %495)\n",
      "  %497 = Slice[axes = [1], ends = [4], starts = [0]](%458)\n",
      "  %498 = Gemm[alpha = 1, beta = 1, transB = 1](%497, %esp_s3.shift_fc.weight, %esp_s3.shift_fc.bias)\n",
      "  %499 = Tanh(%498)\n",
      "  %500 = Slice[axes = [1], ends = [8], starts = [4]](%458)\n",
      "  %501 = Gemm[alpha = 1, beta = 1, transB = 1](%500, %esp_s3.scale_fc.weight, %esp_s3.scale_fc.bias)\n",
      "  %502 = Tanh(%501)\n",
      "  %503 = Slice[axes = [1], ends = [9223372036854775807], starts = [8]](%458)\n",
      "  %504 = Gemm[alpha = 1, beta = 1, transB = 1](%503, %esp_s3.pred_fc.weight, %esp_s3.pred_fc.bias)\n",
      "  %505 = Relu(%504)\n",
      "  %506 = Shape(%505)\n",
      "  %507 = Constant[value = <Scalar Tensor []>]()\n",
      "  %508 = Gather[axis = 0](%506, %507)\n",
      "  %511 = Unsqueeze[axes = [0]](%508)\n",
      "  %514 = Concat[axis = 0](%511, %619, %620)\n",
      "  %515 = Reshape(%505, %514)\n",
      "  %516 = Constant[value = <Scalar Tensor []>]()\n",
      "  %517 = Add(%461, %516)\n",
      "  %518 = Constant[value = <Scalar Tensor []>]()\n",
      "  %519 = Gather[axis = 2](%477, %518)\n",
      "  %520 = Mul(%517, %519)\n",
      "  %521 = Constant[value = <Scalar Tensor []>]()\n",
      "  %522 = Gather[axis = 2](%477, %521)\n",
      "  %523 = Mul(%461, %522)\n",
      "  %524 = Add(%520, %523)\n",
      "  %525 = Constant[value = <Scalar Tensor []>]()\n",
      "  %526 = Add(%461, %525)\n",
      "  %527 = Constant[value = <Scalar Tensor []>]()\n",
      "  %528 = Gather[axis = 2](%477, %527)\n",
      "  %529 = Mul(%526, %528)\n",
      "  %530 = Add(%524, %529)\n",
      "  %531 = Constant[value = <Scalar Tensor []>]()\n",
      "  %532 = Add(%464, %531)\n",
      "  %533 = Constant[value = <Scalar Tensor []>]()\n",
      "  %534 = Mul(%532, %533)\n",
      "  %535 = Div(%530, %534)\n",
      "  %536 = Constant[value = <Scalar Tensor []>]()\n",
      "  %537 = Add(%480, %536)\n",
      "  %538 = Constant[value = <Scalar Tensor []>]()\n",
      "  %539 = Gather[axis = 2](%496, %538)\n",
      "  %540 = Mul(%537, %539)\n",
      "  %541 = Constant[value = <Scalar Tensor []>]()\n",
      "  %542 = Gather[axis = 2](%496, %541)\n",
      "  %543 = Mul(%480, %542)\n",
      "  %544 = Add(%540, %543)\n",
      "  %545 = Constant[value = <Scalar Tensor []>]()\n",
      "  %546 = Add(%480, %545)\n",
      "  %547 = Constant[value = <Scalar Tensor []>]()\n",
      "  %548 = Gather[axis = 2](%496, %547)\n",
      "  %549 = Mul(%546, %548)\n",
      "  %550 = Add(%544, %549)\n",
      "  %551 = Constant[value = <Scalar Tensor []>]()\n",
      "  %552 = Add(%464, %551)\n",
      "  %553 = Constant[value = <Scalar Tensor []>]()\n",
      "  %554 = Mul(%552, %553)\n",
      "  %555 = Div(%550, %554)\n",
      "  %556 = Constant[value = <Scalar Tensor []>]()\n",
      "  %557 = Add(%483, %556)\n",
      "  %558 = Constant[value = <Scalar Tensor []>]()\n",
      "  %559 = Mul(%557, %558)\n",
      "  %560 = Div(%555, %559)\n",
      "  %561 = Constant[value = <Scalar Tensor []>]()\n",
      "  %562 = Add(%499, %561)\n",
      "  %563 = Constant[value = <Scalar Tensor []>]()\n",
      "  %564 = Gather[axis = 2](%515, %563)\n",
      "  %565 = Mul(%562, %564)\n",
      "  %566 = Constant[value = <Scalar Tensor []>]()\n",
      "  %567 = Gather[axis = 2](%515, %566)\n",
      "  %568 = Mul(%499, %567)\n",
      "  %569 = Add(%565, %568)\n",
      "  %570 = Constant[value = <Scalar Tensor []>]()\n",
      "  %571 = Add(%499, %570)\n",
      "  %572 = Constant[value = <Scalar Tensor []>]()\n",
      "  %573 = Gather[axis = 2](%515, %572)\n",
      "  %574 = Mul(%571, %573)\n",
      "  %575 = Add(%569, %574)\n",
      "  %576 = Constant[value = <Scalar Tensor []>]()\n",
      "  %577 = Add(%464, %576)\n",
      "  %578 = Constant[value = <Scalar Tensor []>]()\n",
      "  %579 = Mul(%577, %578)\n",
      "  %580 = Div(%575, %579)\n",
      "  %581 = Constant[value = <Scalar Tensor []>]()\n",
      "  %582 = Add(%483, %581)\n",
      "  %583 = Constant[value = <Scalar Tensor []>]()\n",
      "  %584 = Mul(%582, %583)\n",
      "  %585 = Div(%580, %584)\n",
      "  %586 = Constant[value = <Scalar Tensor []>]()\n",
      "  %587 = Add(%502, %586)\n",
      "  %588 = Constant[value = <Scalar Tensor []>]()\n",
      "  %589 = Mul(%587, %588)\n",
      "  %590 = Div(%585, %589)\n",
      "  %591 = Add(%535, %560)\n",
      "  %592 = Add(%591, %590)\n",
      "  %593 = Constant[value = <Scalar Tensor []>]()\n",
      "  %output = Mul(%592, %593)\n",
      "  return %output\n",
      "}\n"
     ]
    }
   ],
   "source": [
    "#Verify ONNX model\n",
    "model = onnx.load(save_path)\n",
    "\n",
    "# Check that the IR is well formed\n",
    "onnx.checker.check_model(model)\n",
    "\n",
    "# Print a human readable representation of the graph\n",
    "print(onnx.helper.printable_graph(model.graph))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**5. Compare ONNXRuntime and Pytorch Exported Model Output:-**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Model Testing was Successful, ONNXRuntime Model Output matches with Pytorch Model Output!\n"
     ]
    }
   ],
   "source": [
    "ort_session = onnxruntime.InferenceSession(save_path)\n",
    "\n",
    "def to_numpy(tensor):\n",
    "    return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()\n",
    "\n",
    "# compute ONNX Runtime output prediction\n",
    "ort_inputs = {ort_session.get_inputs()[0].name: to_numpy(x)}\n",
    "ort_outs = ort_session.run(None, ort_inputs)\n",
    "\n",
    "# compare ONNX Runtime and PyTorch results\n",
    "np.testing.assert_allclose(to_numpy(model_out), ort_outs[0], rtol=1e-03, atol=1e-05)\n",
    "\n",
    "print(\"Model Testing was Successful, ONNXRuntime Model Output matches with Pytorch Model Output!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
