{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#注意的地方\n",
    "代码里面有部分参数相关的代码需要简单的解释一下，避免跳坑：\n",
    "\n",
    "#配置文件.cfg\n",
    "首先你需要将配置文件内的batch和subdivision参数设置为1；\n",
    "\n",
    "#其次，你需要将配置文件最后一行增加一行空格；\n",
    "\n",
    "代码中的此部分需要注意，后期要是你需要转换自己的训练的模型需要修改此部分的参数，将255改为你对应使用的参数\n",
    "output_tensor_dims['082_convolutional'] = [255, 19, 19] # 255代表的是3*（classes + 4 + 1）\n",
    "output_tensor_dims['094_convolutional'] = [255, 38, 38] # 255代表的是3*（classes + 4 + 1）\n",
    "output_tensor_dims['106_convolutional'] = [255, 76, 76] # 255代表的是3*（classes + 4 + 1）\n",
    "————————————————\n",
    "\n",
    "原文链接：https://blog.csdn.net/weixin_38106878/article/details/103714551"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Layer of type yolo not supported, skipping ONNX node generation.\n",
      "Layer of type yolo not supported, skipping ONNX node generation.\n",
      "Layer of type yolo not supported, skipping ONNX node generation.\n",
      "graph YOLOv3-608 (\n",
      "  %000_net[FLOAT, 1x3x416x416]\n",
      ") initializers (\n",
      "  %001_convolutional_bn_scale[FLOAT, 32]\n",
      "  %001_convolutional_bn_bias[FLOAT, 32]\n",
      "  %001_convolutional_bn_mean[FLOAT, 32]\n",
      "  %001_convolutional_bn_var[FLOAT, 32]\n",
      "  %001_convolutional_conv_weights[FLOAT, 32x3x3x3]\n",
      "  %002_convolutional_bn_scale[FLOAT, 64]\n",
      "  %002_convolutional_bn_bias[FLOAT, 64]\n",
      "  %002_convolutional_bn_mean[FLOAT, 64]\n",
      "  %002_convolutional_bn_var[FLOAT, 64]\n",
      "  %002_convolutional_conv_weights[FLOAT, 64x32x3x3]\n",
      "  %003_convolutional_bn_scale[FLOAT, 32]\n",
      "  %003_convolutional_bn_bias[FLOAT, 32]\n",
      "  %003_convolutional_bn_mean[FLOAT, 32]\n",
      "  %003_convolutional_bn_var[FLOAT, 32]\n",
      "  %003_convolutional_conv_weights[FLOAT, 32x64x1x1]\n",
      "  %004_convolutional_bn_scale[FLOAT, 64]\n",
      "  %004_convolutional_bn_bias[FLOAT, 64]\n",
      "  %004_convolutional_bn_mean[FLOAT, 64]\n",
      "  %004_convolutional_bn_var[FLOAT, 64]\n",
      "  %004_convolutional_conv_weights[FLOAT, 64x32x3x3]\n",
      "  %006_convolutional_bn_scale[FLOAT, 128]\n",
      "  %006_convolutional_bn_bias[FLOAT, 128]\n",
      "  %006_convolutional_bn_mean[FLOAT, 128]\n",
      "  %006_convolutional_bn_var[FLOAT, 128]\n",
      "  %006_convolutional_conv_weights[FLOAT, 128x64x3x3]\n",
      "  %007_convolutional_bn_scale[FLOAT, 64]\n",
      "  %007_convolutional_bn_bias[FLOAT, 64]\n",
      "  %007_convolutional_bn_mean[FLOAT, 64]\n",
      "  %007_convolutional_bn_var[FLOAT, 64]\n",
      "  %007_convolutional_conv_weights[FLOAT, 64x128x1x1]\n",
      "  %008_convolutional_bn_scale[FLOAT, 128]\n",
      "  %008_convolutional_bn_bias[FLOAT, 128]\n",
      "  %008_convolutional_bn_mean[FLOAT, 128]\n",
      "  %008_convolutional_bn_var[FLOAT, 128]\n",
      "  %008_convolutional_conv_weights[FLOAT, 128x64x3x3]\n",
      "  %010_convolutional_bn_scale[FLOAT, 64]\n",
      "  %010_convolutional_bn_bias[FLOAT, 64]\n",
      "  %010_convolutional_bn_mean[FLOAT, 64]\n",
      "  %010_convolutional_bn_var[FLOAT, 64]\n",
      "  %010_convolutional_conv_weights[FLOAT, 64x128x1x1]\n",
      "  %011_convolutional_bn_scale[FLOAT, 128]\n",
      "  %011_convolutional_bn_bias[FLOAT, 128]\n",
      "  %011_convolutional_bn_mean[FLOAT, 128]\n",
      "  %011_convolutional_bn_var[FLOAT, 128]\n",
      "  %011_convolutional_conv_weights[FLOAT, 128x64x3x3]\n",
      "  %013_convolutional_bn_scale[FLOAT, 256]\n",
      "  %013_convolutional_bn_bias[FLOAT, 256]\n",
      "  %013_convolutional_bn_mean[FLOAT, 256]\n",
      "  %013_convolutional_bn_var[FLOAT, 256]\n",
      "  %013_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %014_convolutional_bn_scale[FLOAT, 128]\n",
      "  %014_convolutional_bn_bias[FLOAT, 128]\n",
      "  %014_convolutional_bn_mean[FLOAT, 128]\n",
      "  %014_convolutional_bn_var[FLOAT, 128]\n",
      "  %014_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %015_convolutional_bn_scale[FLOAT, 256]\n",
      "  %015_convolutional_bn_bias[FLOAT, 256]\n",
      "  %015_convolutional_bn_mean[FLOAT, 256]\n",
      "  %015_convolutional_bn_var[FLOAT, 256]\n",
      "  %015_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %017_convolutional_bn_scale[FLOAT, 128]\n",
      "  %017_convolutional_bn_bias[FLOAT, 128]\n",
      "  %017_convolutional_bn_mean[FLOAT, 128]\n",
      "  %017_convolutional_bn_var[FLOAT, 128]\n",
      "  %017_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %018_convolutional_bn_scale[FLOAT, 256]\n",
      "  %018_convolutional_bn_bias[FLOAT, 256]\n",
      "  %018_convolutional_bn_mean[FLOAT, 256]\n",
      "  %018_convolutional_bn_var[FLOAT, 256]\n",
      "  %018_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %020_convolutional_bn_scale[FLOAT, 128]\n",
      "  %020_convolutional_bn_bias[FLOAT, 128]\n",
      "  %020_convolutional_bn_mean[FLOAT, 128]\n",
      "  %020_convolutional_bn_var[FLOAT, 128]\n",
      "  %020_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %021_convolutional_bn_scale[FLOAT, 256]\n",
      "  %021_convolutional_bn_bias[FLOAT, 256]\n",
      "  %021_convolutional_bn_mean[FLOAT, 256]\n",
      "  %021_convolutional_bn_var[FLOAT, 256]\n",
      "  %021_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %023_convolutional_bn_scale[FLOAT, 128]\n",
      "  %023_convolutional_bn_bias[FLOAT, 128]\n",
      "  %023_convolutional_bn_mean[FLOAT, 128]\n",
      "  %023_convolutional_bn_var[FLOAT, 128]\n",
      "  %023_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %024_convolutional_bn_scale[FLOAT, 256]\n",
      "  %024_convolutional_bn_bias[FLOAT, 256]\n",
      "  %024_convolutional_bn_mean[FLOAT, 256]\n",
      "  %024_convolutional_bn_var[FLOAT, 256]\n",
      "  %024_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %026_convolutional_bn_scale[FLOAT, 128]\n",
      "  %026_convolutional_bn_bias[FLOAT, 128]\n",
      "  %026_convolutional_bn_mean[FLOAT, 128]\n",
      "  %026_convolutional_bn_var[FLOAT, 128]\n",
      "  %026_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %027_convolutional_bn_scale[FLOAT, 256]\n",
      "  %027_convolutional_bn_bias[FLOAT, 256]\n",
      "  %027_convolutional_bn_mean[FLOAT, 256]\n",
      "  %027_convolutional_bn_var[FLOAT, 256]\n",
      "  %027_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %029_convolutional_bn_scale[FLOAT, 128]\n",
      "  %029_convolutional_bn_bias[FLOAT, 128]\n",
      "  %029_convolutional_bn_mean[FLOAT, 128]\n",
      "  %029_convolutional_bn_var[FLOAT, 128]\n",
      "  %029_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %030_convolutional_bn_scale[FLOAT, 256]\n",
      "  %030_convolutional_bn_bias[FLOAT, 256]\n",
      "  %030_convolutional_bn_mean[FLOAT, 256]\n",
      "  %030_convolutional_bn_var[FLOAT, 256]\n",
      "  %030_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %032_convolutional_bn_scale[FLOAT, 128]\n",
      "  %032_convolutional_bn_bias[FLOAT, 128]\n",
      "  %032_convolutional_bn_mean[FLOAT, 128]\n",
      "  %032_convolutional_bn_var[FLOAT, 128]\n",
      "  %032_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %033_convolutional_bn_scale[FLOAT, 256]\n",
      "  %033_convolutional_bn_bias[FLOAT, 256]\n",
      "  %033_convolutional_bn_mean[FLOAT, 256]\n",
      "  %033_convolutional_bn_var[FLOAT, 256]\n",
      "  %033_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %035_convolutional_bn_scale[FLOAT, 128]\n",
      "  %035_convolutional_bn_bias[FLOAT, 128]\n",
      "  %035_convolutional_bn_mean[FLOAT, 128]\n",
      "  %035_convolutional_bn_var[FLOAT, 128]\n",
      "  %035_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %036_convolutional_bn_scale[FLOAT, 256]\n",
      "  %036_convolutional_bn_bias[FLOAT, 256]\n",
      "  %036_convolutional_bn_mean[FLOAT, 256]\n",
      "  %036_convolutional_bn_var[FLOAT, 256]\n",
      "  %036_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %038_convolutional_bn_scale[FLOAT, 512]\n",
      "  %038_convolutional_bn_bias[FLOAT, 512]\n",
      "  %038_convolutional_bn_mean[FLOAT, 512]\n",
      "  %038_convolutional_bn_var[FLOAT, 512]\n",
      "  %038_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %039_convolutional_bn_scale[FLOAT, 256]\n",
      "  %039_convolutional_bn_bias[FLOAT, 256]\n",
      "  %039_convolutional_bn_mean[FLOAT, 256]\n",
      "  %039_convolutional_bn_var[FLOAT, 256]\n",
      "  %039_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %040_convolutional_bn_scale[FLOAT, 512]\n",
      "  %040_convolutional_bn_bias[FLOAT, 512]\n",
      "  %040_convolutional_bn_mean[FLOAT, 512]\n",
      "  %040_convolutional_bn_var[FLOAT, 512]\n",
      "  %040_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %042_convolutional_bn_scale[FLOAT, 256]\n",
      "  %042_convolutional_bn_bias[FLOAT, 256]\n",
      "  %042_convolutional_bn_mean[FLOAT, 256]\n",
      "  %042_convolutional_bn_var[FLOAT, 256]\n",
      "  %042_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %043_convolutional_bn_scale[FLOAT, 512]\n",
      "  %043_convolutional_bn_bias[FLOAT, 512]\n",
      "  %043_convolutional_bn_mean[FLOAT, 512]\n",
      "  %043_convolutional_bn_var[FLOAT, 512]\n",
      "  %043_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %045_convolutional_bn_scale[FLOAT, 256]\n",
      "  %045_convolutional_bn_bias[FLOAT, 256]\n",
      "  %045_convolutional_bn_mean[FLOAT, 256]\n",
      "  %045_convolutional_bn_var[FLOAT, 256]\n",
      "  %045_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %046_convolutional_bn_scale[FLOAT, 512]\n",
      "  %046_convolutional_bn_bias[FLOAT, 512]\n",
      "  %046_convolutional_bn_mean[FLOAT, 512]\n",
      "  %046_convolutional_bn_var[FLOAT, 512]\n",
      "  %046_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %048_convolutional_bn_scale[FLOAT, 256]\n",
      "  %048_convolutional_bn_bias[FLOAT, 256]\n",
      "  %048_convolutional_bn_mean[FLOAT, 256]\n",
      "  %048_convolutional_bn_var[FLOAT, 256]\n",
      "  %048_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %049_convolutional_bn_scale[FLOAT, 512]\n",
      "  %049_convolutional_bn_bias[FLOAT, 512]\n",
      "  %049_convolutional_bn_mean[FLOAT, 512]\n",
      "  %049_convolutional_bn_var[FLOAT, 512]\n",
      "  %049_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %051_convolutional_bn_scale[FLOAT, 256]\n",
      "  %051_convolutional_bn_bias[FLOAT, 256]\n",
      "  %051_convolutional_bn_mean[FLOAT, 256]\n",
      "  %051_convolutional_bn_var[FLOAT, 256]\n",
      "  %051_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %052_convolutional_bn_scale[FLOAT, 512]\n",
      "  %052_convolutional_bn_bias[FLOAT, 512]\n",
      "  %052_convolutional_bn_mean[FLOAT, 512]\n",
      "  %052_convolutional_bn_var[FLOAT, 512]\n",
      "  %052_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %054_convolutional_bn_scale[FLOAT, 256]\n",
      "  %054_convolutional_bn_bias[FLOAT, 256]\n",
      "  %054_convolutional_bn_mean[FLOAT, 256]\n",
      "  %054_convolutional_bn_var[FLOAT, 256]\n",
      "  %054_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %055_convolutional_bn_scale[FLOAT, 512]\n",
      "  %055_convolutional_bn_bias[FLOAT, 512]\n",
      "  %055_convolutional_bn_mean[FLOAT, 512]\n",
      "  %055_convolutional_bn_var[FLOAT, 512]\n",
      "  %055_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %057_convolutional_bn_scale[FLOAT, 256]\n",
      "  %057_convolutional_bn_bias[FLOAT, 256]\n",
      "  %057_convolutional_bn_mean[FLOAT, 256]\n",
      "  %057_convolutional_bn_var[FLOAT, 256]\n",
      "  %057_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %058_convolutional_bn_scale[FLOAT, 512]\n",
      "  %058_convolutional_bn_bias[FLOAT, 512]\n",
      "  %058_convolutional_bn_mean[FLOAT, 512]\n",
      "  %058_convolutional_bn_var[FLOAT, 512]\n",
      "  %058_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %060_convolutional_bn_scale[FLOAT, 256]\n",
      "  %060_convolutional_bn_bias[FLOAT, 256]\n",
      "  %060_convolutional_bn_mean[FLOAT, 256]\n",
      "  %060_convolutional_bn_var[FLOAT, 256]\n",
      "  %060_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %061_convolutional_bn_scale[FLOAT, 512]\n",
      "  %061_convolutional_bn_bias[FLOAT, 512]\n",
      "  %061_convolutional_bn_mean[FLOAT, 512]\n",
      "  %061_convolutional_bn_var[FLOAT, 512]\n",
      "  %061_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %063_convolutional_bn_scale[FLOAT, 1024]\n",
      "  %063_convolutional_bn_bias[FLOAT, 1024]\n",
      "  %063_convolutional_bn_mean[FLOAT, 1024]\n",
      "  %063_convolutional_bn_var[FLOAT, 1024]\n",
      "  %063_convolutional_conv_weights[FLOAT, 1024x512x3x3]\n",
      "  %064_convolutional_bn_scale[FLOAT, 512]\n",
      "  %064_convolutional_bn_bias[FLOAT, 512]\n",
      "  %064_convolutional_bn_mean[FLOAT, 512]\n",
      "  %064_convolutional_bn_var[FLOAT, 512]\n",
      "  %064_convolutional_conv_weights[FLOAT, 512x1024x1x1]\n",
      "  %065_convolutional_bn_scale[FLOAT, 1024]\n",
      "  %065_convolutional_bn_bias[FLOAT, 1024]\n",
      "  %065_convolutional_bn_mean[FLOAT, 1024]\n",
      "  %065_convolutional_bn_var[FLOAT, 1024]\n",
      "  %065_convolutional_conv_weights[FLOAT, 1024x512x3x3]\n",
      "  %067_convolutional_bn_scale[FLOAT, 512]\n",
      "  %067_convolutional_bn_bias[FLOAT, 512]\n",
      "  %067_convolutional_bn_mean[FLOAT, 512]\n",
      "  %067_convolutional_bn_var[FLOAT, 512]\n",
      "  %067_convolutional_conv_weights[FLOAT, 512x1024x1x1]\n",
      "  %068_convolutional_bn_scale[FLOAT, 1024]\n",
      "  %068_convolutional_bn_bias[FLOAT, 1024]\n",
      "  %068_convolutional_bn_mean[FLOAT, 1024]\n",
      "  %068_convolutional_bn_var[FLOAT, 1024]\n",
      "  %068_convolutional_conv_weights[FLOAT, 1024x512x3x3]\n",
      "  %070_convolutional_bn_scale[FLOAT, 512]\n",
      "  %070_convolutional_bn_bias[FLOAT, 512]\n",
      "  %070_convolutional_bn_mean[FLOAT, 512]\n",
      "  %070_convolutional_bn_var[FLOAT, 512]\n",
      "  %070_convolutional_conv_weights[FLOAT, 512x1024x1x1]\n",
      "  %071_convolutional_bn_scale[FLOAT, 1024]\n",
      "  %071_convolutional_bn_bias[FLOAT, 1024]\n",
      "  %071_convolutional_bn_mean[FLOAT, 1024]\n",
      "  %071_convolutional_bn_var[FLOAT, 1024]\n",
      "  %071_convolutional_conv_weights[FLOAT, 1024x512x3x3]\n",
      "  %073_convolutional_bn_scale[FLOAT, 512]\n",
      "  %073_convolutional_bn_bias[FLOAT, 512]\n",
      "  %073_convolutional_bn_mean[FLOAT, 512]\n",
      "  %073_convolutional_bn_var[FLOAT, 512]\n",
      "  %073_convolutional_conv_weights[FLOAT, 512x1024x1x1]\n",
      "  %074_convolutional_bn_scale[FLOAT, 1024]\n",
      "  %074_convolutional_bn_bias[FLOAT, 1024]\n",
      "  %074_convolutional_bn_mean[FLOAT, 1024]\n",
      "  %074_convolutional_bn_var[FLOAT, 1024]\n",
      "  %074_convolutional_conv_weights[FLOAT, 1024x512x3x3]\n",
      "  %076_convolutional_bn_scale[FLOAT, 512]\n",
      "  %076_convolutional_bn_bias[FLOAT, 512]\n",
      "  %076_convolutional_bn_mean[FLOAT, 512]\n",
      "  %076_convolutional_bn_var[FLOAT, 512]\n",
      "  %076_convolutional_conv_weights[FLOAT, 512x1024x1x1]\n",
      "  %077_convolutional_bn_scale[FLOAT, 1024]\n",
      "  %077_convolutional_bn_bias[FLOAT, 1024]\n",
      "  %077_convolutional_bn_mean[FLOAT, 1024]\n",
      "  %077_convolutional_bn_var[FLOAT, 1024]\n",
      "  %077_convolutional_conv_weights[FLOAT, 1024x512x3x3]\n",
      "  %078_convolutional_bn_scale[FLOAT, 512]\n",
      "  %078_convolutional_bn_bias[FLOAT, 512]\n",
      "  %078_convolutional_bn_mean[FLOAT, 512]\n",
      "  %078_convolutional_bn_var[FLOAT, 512]\n",
      "  %078_convolutional_conv_weights[FLOAT, 512x1024x1x1]\n",
      "  %079_convolutional_bn_scale[FLOAT, 1024]\n",
      "  %079_convolutional_bn_bias[FLOAT, 1024]\n",
      "  %079_convolutional_bn_mean[FLOAT, 1024]\n",
      "  %079_convolutional_bn_var[FLOAT, 1024]\n",
      "  %079_convolutional_conv_weights[FLOAT, 1024x512x3x3]\n",
      "  %080_convolutional_bn_scale[FLOAT, 512]\n",
      "  %080_convolutional_bn_bias[FLOAT, 512]\n",
      "  %080_convolutional_bn_mean[FLOAT, 512]\n",
      "  %080_convolutional_bn_var[FLOAT, 512]\n",
      "  %080_convolutional_conv_weights[FLOAT, 512x1024x1x1]\n",
      "  %081_convolutional_bn_scale[FLOAT, 1024]\n",
      "  %081_convolutional_bn_bias[FLOAT, 1024]\n",
      "  %081_convolutional_bn_mean[FLOAT, 1024]\n",
      "  %081_convolutional_bn_var[FLOAT, 1024]\n",
      "  %081_convolutional_conv_weights[FLOAT, 1024x512x3x3]\n",
      "  %082_convolutional_conv_bias[FLOAT, 18]\n",
      "  %082_convolutional_conv_weights[FLOAT, 18x1024x1x1]\n",
      "  %085_convolutional_bn_scale[FLOAT, 256]\n",
      "  %085_convolutional_bn_bias[FLOAT, 256]\n",
      "  %085_convolutional_bn_mean[FLOAT, 256]\n",
      "  %085_convolutional_bn_var[FLOAT, 256]\n",
      "  %085_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %088_convolutional_bn_scale[FLOAT, 256]\n",
      "  %088_convolutional_bn_bias[FLOAT, 256]\n",
      "  %088_convolutional_bn_mean[FLOAT, 256]\n",
      "  %088_convolutional_bn_var[FLOAT, 256]\n",
      "  %088_convolutional_conv_weights[FLOAT, 256x768x1x1]\n",
      "  %089_convolutional_bn_scale[FLOAT, 512]\n",
      "  %089_convolutional_bn_bias[FLOAT, 512]\n",
      "  %089_convolutional_bn_mean[FLOAT, 512]\n",
      "  %089_convolutional_bn_var[FLOAT, 512]\n",
      "  %089_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %090_convolutional_bn_scale[FLOAT, 256]\n",
      "  %090_convolutional_bn_bias[FLOAT, 256]\n",
      "  %090_convolutional_bn_mean[FLOAT, 256]\n",
      "  %090_convolutional_bn_var[FLOAT, 256]\n",
      "  %090_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %091_convolutional_bn_scale[FLOAT, 512]\n",
      "  %091_convolutional_bn_bias[FLOAT, 512]\n",
      "  %091_convolutional_bn_mean[FLOAT, 512]\n",
      "  %091_convolutional_bn_var[FLOAT, 512]\n",
      "  %091_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %092_convolutional_bn_scale[FLOAT, 256]\n",
      "  %092_convolutional_bn_bias[FLOAT, 256]\n",
      "  %092_convolutional_bn_mean[FLOAT, 256]\n",
      "  %092_convolutional_bn_var[FLOAT, 256]\n",
      "  %092_convolutional_conv_weights[FLOAT, 256x512x1x1]\n",
      "  %093_convolutional_bn_scale[FLOAT, 512]\n",
      "  %093_convolutional_bn_bias[FLOAT, 512]\n",
      "  %093_convolutional_bn_mean[FLOAT, 512]\n",
      "  %093_convolutional_bn_var[FLOAT, 512]\n",
      "  %093_convolutional_conv_weights[FLOAT, 512x256x3x3]\n",
      "  %094_convolutional_conv_bias[FLOAT, 18]\n",
      "  %094_convolutional_conv_weights[FLOAT, 18x512x1x1]\n",
      "  %097_convolutional_bn_scale[FLOAT, 128]\n",
      "  %097_convolutional_bn_bias[FLOAT, 128]\n",
      "  %097_convolutional_bn_mean[FLOAT, 128]\n",
      "  %097_convolutional_bn_var[FLOAT, 128]\n",
      "  %097_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %100_convolutional_bn_scale[FLOAT, 128]\n",
      "  %100_convolutional_bn_bias[FLOAT, 128]\n",
      "  %100_convolutional_bn_mean[FLOAT, 128]\n",
      "  %100_convolutional_bn_var[FLOAT, 128]\n",
      "  %100_convolutional_conv_weights[FLOAT, 128x384x1x1]\n",
      "  %101_convolutional_bn_scale[FLOAT, 256]\n",
      "  %101_convolutional_bn_bias[FLOAT, 256]\n",
      "  %101_convolutional_bn_mean[FLOAT, 256]\n",
      "  %101_convolutional_bn_var[FLOAT, 256]\n",
      "  %101_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %102_convolutional_bn_scale[FLOAT, 128]\n",
      "  %102_convolutional_bn_bias[FLOAT, 128]\n",
      "  %102_convolutional_bn_mean[FLOAT, 128]\n",
      "  %102_convolutional_bn_var[FLOAT, 128]\n",
      "  %102_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %103_convolutional_bn_scale[FLOAT, 256]\n",
      "  %103_convolutional_bn_bias[FLOAT, 256]\n",
      "  %103_convolutional_bn_mean[FLOAT, 256]\n",
      "  %103_convolutional_bn_var[FLOAT, 256]\n",
      "  %103_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %104_convolutional_bn_scale[FLOAT, 128]\n",
      "  %104_convolutional_bn_bias[FLOAT, 128]\n",
      "  %104_convolutional_bn_mean[FLOAT, 128]\n",
      "  %104_convolutional_bn_var[FLOAT, 128]\n",
      "  %104_convolutional_conv_weights[FLOAT, 128x256x1x1]\n",
      "  %105_convolutional_bn_scale[FLOAT, 256]\n",
      "  %105_convolutional_bn_bias[FLOAT, 256]\n",
      "  %105_convolutional_bn_mean[FLOAT, 256]\n",
      "  %105_convolutional_bn_var[FLOAT, 256]\n",
      "  %105_convolutional_conv_weights[FLOAT, 256x128x3x3]\n",
      "  %106_convolutional_conv_bias[FLOAT, 18]\n",
      "  %106_convolutional_conv_weights[FLOAT, 18x256x1x1]\n",
      ") {\n",
      "  %001_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%000_net, %001_convolutional_conv_weights)\n",
      "  %001_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%001_convolutional, %001_convolutional_bn_scale, %001_convolutional_bn_bias, %001_convolutional_bn_mean, %001_convolutional_bn_var)\n",
      "  %001_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%001_convolutional_bn)\n",
      "  %002_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [2, 2]](%001_convolutional_lrelu, %002_convolutional_conv_weights)\n",
      "  %002_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%002_convolutional, %002_convolutional_bn_scale, %002_convolutional_bn_bias, %002_convolutional_bn_mean, %002_convolutional_bn_var)\n",
      "  %002_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%002_convolutional_bn)\n",
      "  %003_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%002_convolutional_lrelu, %003_convolutional_conv_weights)\n",
      "  %003_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%003_convolutional, %003_convolutional_bn_scale, %003_convolutional_bn_bias, %003_convolutional_bn_mean, %003_convolutional_bn_var)\n",
      "  %003_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%003_convolutional_bn)\n",
      "  %004_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%003_convolutional_lrelu, %004_convolutional_conv_weights)\n",
      "  %004_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%004_convolutional, %004_convolutional_bn_scale, %004_convolutional_bn_bias, %004_convolutional_bn_mean, %004_convolutional_bn_var)\n",
      "  %004_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%004_convolutional_bn)\n",
      "  %005_shortcut = Add(%004_convolutional_lrelu, %002_convolutional_lrelu)\n",
      "  %006_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [2, 2]](%005_shortcut, %006_convolutional_conv_weights)\n",
      "  %006_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%006_convolutional, %006_convolutional_bn_scale, %006_convolutional_bn_bias, %006_convolutional_bn_mean, %006_convolutional_bn_var)\n",
      "  %006_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%006_convolutional_bn)\n",
      "  %007_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%006_convolutional_lrelu, %007_convolutional_conv_weights)\n",
      "  %007_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%007_convolutional, %007_convolutional_bn_scale, %007_convolutional_bn_bias, %007_convolutional_bn_mean, %007_convolutional_bn_var)\n",
      "  %007_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%007_convolutional_bn)\n",
      "  %008_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%007_convolutional_lrelu, %008_convolutional_conv_weights)\n",
      "  %008_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%008_convolutional, %008_convolutional_bn_scale, %008_convolutional_bn_bias, %008_convolutional_bn_mean, %008_convolutional_bn_var)\n",
      "  %008_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%008_convolutional_bn)\n",
      "  %009_shortcut = Add(%008_convolutional_lrelu, %006_convolutional_lrelu)\n",
      "  %010_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%009_shortcut, %010_convolutional_conv_weights)\n",
      "  %010_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%010_convolutional, %010_convolutional_bn_scale, %010_convolutional_bn_bias, %010_convolutional_bn_mean, %010_convolutional_bn_var)\n",
      "  %010_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%010_convolutional_bn)\n",
      "  %011_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%010_convolutional_lrelu, %011_convolutional_conv_weights)\n",
      "  %011_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%011_convolutional, %011_convolutional_bn_scale, %011_convolutional_bn_bias, %011_convolutional_bn_mean, %011_convolutional_bn_var)\n",
      "  %011_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%011_convolutional_bn)\n",
      "  %012_shortcut = Add(%011_convolutional_lrelu, %009_shortcut)\n",
      "  %013_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [2, 2]](%012_shortcut, %013_convolutional_conv_weights)\n",
      "  %013_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%013_convolutional, %013_convolutional_bn_scale, %013_convolutional_bn_bias, %013_convolutional_bn_mean, %013_convolutional_bn_var)\n",
      "  %013_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%013_convolutional_bn)\n",
      "  %014_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%013_convolutional_lrelu, %014_convolutional_conv_weights)\n",
      "  %014_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%014_convolutional, %014_convolutional_bn_scale, %014_convolutional_bn_bias, %014_convolutional_bn_mean, %014_convolutional_bn_var)\n",
      "  %014_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%014_convolutional_bn)\n",
      "  %015_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%014_convolutional_lrelu, %015_convolutional_conv_weights)\n",
      "  %015_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%015_convolutional, %015_convolutional_bn_scale, %015_convolutional_bn_bias, %015_convolutional_bn_mean, %015_convolutional_bn_var)\n",
      "  %015_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%015_convolutional_bn)\n",
      "  %016_shortcut = Add(%015_convolutional_lrelu, %013_convolutional_lrelu)\n",
      "  %017_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%016_shortcut, %017_convolutional_conv_weights)\n",
      "  %017_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%017_convolutional, %017_convolutional_bn_scale, %017_convolutional_bn_bias, %017_convolutional_bn_mean, %017_convolutional_bn_var)\n",
      "  %017_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%017_convolutional_bn)\n",
      "  %018_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%017_convolutional_lrelu, %018_convolutional_conv_weights)\n",
      "  %018_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%018_convolutional, %018_convolutional_bn_scale, %018_convolutional_bn_bias, %018_convolutional_bn_mean, %018_convolutional_bn_var)\n",
      "  %018_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%018_convolutional_bn)\n",
      "  %019_shortcut = Add(%018_convolutional_lrelu, %016_shortcut)\n",
      "  %020_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%019_shortcut, %020_convolutional_conv_weights)\n",
      "  %020_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%020_convolutional, %020_convolutional_bn_scale, %020_convolutional_bn_bias, %020_convolutional_bn_mean, %020_convolutional_bn_var)\n",
      "  %020_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%020_convolutional_bn)\n",
      "  %021_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%020_convolutional_lrelu, %021_convolutional_conv_weights)\n",
      "  %021_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%021_convolutional, %021_convolutional_bn_scale, %021_convolutional_bn_bias, %021_convolutional_bn_mean, %021_convolutional_bn_var)\n",
      "  %021_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%021_convolutional_bn)\n",
      "  %022_shortcut = Add(%021_convolutional_lrelu, %019_shortcut)\n",
      "  %023_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%022_shortcut, %023_convolutional_conv_weights)\n",
      "  %023_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%023_convolutional, %023_convolutional_bn_scale, %023_convolutional_bn_bias, %023_convolutional_bn_mean, %023_convolutional_bn_var)\n",
      "  %023_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%023_convolutional_bn)\n",
      "  %024_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%023_convolutional_lrelu, %024_convolutional_conv_weights)\n",
      "  %024_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%024_convolutional, %024_convolutional_bn_scale, %024_convolutional_bn_bias, %024_convolutional_bn_mean, %024_convolutional_bn_var)\n",
      "  %024_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%024_convolutional_bn)\n",
      "  %025_shortcut = Add(%024_convolutional_lrelu, %022_shortcut)\n",
      "  %026_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%025_shortcut, %026_convolutional_conv_weights)\n",
      "  %026_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%026_convolutional, %026_convolutional_bn_scale, %026_convolutional_bn_bias, %026_convolutional_bn_mean, %026_convolutional_bn_var)\n",
      "  %026_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%026_convolutional_bn)\n",
      "  %027_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%026_convolutional_lrelu, %027_convolutional_conv_weights)\n",
      "  %027_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%027_convolutional, %027_convolutional_bn_scale, %027_convolutional_bn_bias, %027_convolutional_bn_mean, %027_convolutional_bn_var)\n",
      "  %027_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%027_convolutional_bn)\n",
      "  %028_shortcut = Add(%027_convolutional_lrelu, %025_shortcut)\n",
      "  %029_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%028_shortcut, %029_convolutional_conv_weights)\n",
      "  %029_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%029_convolutional, %029_convolutional_bn_scale, %029_convolutional_bn_bias, %029_convolutional_bn_mean, %029_convolutional_bn_var)\n",
      "  %029_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%029_convolutional_bn)\n",
      "  %030_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%029_convolutional_lrelu, %030_convolutional_conv_weights)\n",
      "  %030_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%030_convolutional, %030_convolutional_bn_scale, %030_convolutional_bn_bias, %030_convolutional_bn_mean, %030_convolutional_bn_var)\n",
      "  %030_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%030_convolutional_bn)\n",
      "  %031_shortcut = Add(%030_convolutional_lrelu, %028_shortcut)\n",
      "  %032_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%031_shortcut, %032_convolutional_conv_weights)\n",
      "  %032_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%032_convolutional, %032_convolutional_bn_scale, %032_convolutional_bn_bias, %032_convolutional_bn_mean, %032_convolutional_bn_var)\n",
      "  %032_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%032_convolutional_bn)\n",
      "  %033_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%032_convolutional_lrelu, %033_convolutional_conv_weights)\n",
      "  %033_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%033_convolutional, %033_convolutional_bn_scale, %033_convolutional_bn_bias, %033_convolutional_bn_mean, %033_convolutional_bn_var)\n",
      "  %033_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%033_convolutional_bn)\n",
      "  %034_shortcut = Add(%033_convolutional_lrelu, %031_shortcut)\n",
      "  %035_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%034_shortcut, %035_convolutional_conv_weights)\n",
      "  %035_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%035_convolutional, %035_convolutional_bn_scale, %035_convolutional_bn_bias, %035_convolutional_bn_mean, %035_convolutional_bn_var)\n",
      "  %035_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%035_convolutional_bn)\n",
      "  %036_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%035_convolutional_lrelu, %036_convolutional_conv_weights)\n",
      "  %036_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%036_convolutional, %036_convolutional_bn_scale, %036_convolutional_bn_bias, %036_convolutional_bn_mean, %036_convolutional_bn_var)\n",
      "  %036_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%036_convolutional_bn)\n",
      "  %037_shortcut = Add(%036_convolutional_lrelu, %034_shortcut)\n",
      "  %038_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [2, 2]](%037_shortcut, %038_convolutional_conv_weights)\n",
      "  %038_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%038_convolutional, %038_convolutional_bn_scale, %038_convolutional_bn_bias, %038_convolutional_bn_mean, %038_convolutional_bn_var)\n",
      "  %038_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%038_convolutional_bn)\n",
      "  %039_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%038_convolutional_lrelu, %039_convolutional_conv_weights)\n",
      "  %039_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%039_convolutional, %039_convolutional_bn_scale, %039_convolutional_bn_bias, %039_convolutional_bn_mean, %039_convolutional_bn_var)\n",
      "  %039_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%039_convolutional_bn)\n",
      "  %040_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%039_convolutional_lrelu, %040_convolutional_conv_weights)\n",
      "  %040_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%040_convolutional, %040_convolutional_bn_scale, %040_convolutional_bn_bias, %040_convolutional_bn_mean, %040_convolutional_bn_var)\n",
      "  %040_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%040_convolutional_bn)\n",
      "  %041_shortcut = Add(%040_convolutional_lrelu, %038_convolutional_lrelu)\n",
      "  %042_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%041_shortcut, %042_convolutional_conv_weights)\n",
      "  %042_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%042_convolutional, %042_convolutional_bn_scale, %042_convolutional_bn_bias, %042_convolutional_bn_mean, %042_convolutional_bn_var)\n",
      "  %042_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%042_convolutional_bn)\n",
      "  %043_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%042_convolutional_lrelu, %043_convolutional_conv_weights)\n",
      "  %043_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%043_convolutional, %043_convolutional_bn_scale, %043_convolutional_bn_bias, %043_convolutional_bn_mean, %043_convolutional_bn_var)\n",
      "  %043_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%043_convolutional_bn)\n",
      "  %044_shortcut = Add(%043_convolutional_lrelu, %041_shortcut)\n",
      "  %045_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%044_shortcut, %045_convolutional_conv_weights)\n",
      "  %045_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%045_convolutional, %045_convolutional_bn_scale, %045_convolutional_bn_bias, %045_convolutional_bn_mean, %045_convolutional_bn_var)\n",
      "  %045_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%045_convolutional_bn)\n",
      "  %046_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%045_convolutional_lrelu, %046_convolutional_conv_weights)\n",
      "  %046_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%046_convolutional, %046_convolutional_bn_scale, %046_convolutional_bn_bias, %046_convolutional_bn_mean, %046_convolutional_bn_var)\n",
      "  %046_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%046_convolutional_bn)\n",
      "  %047_shortcut = Add(%046_convolutional_lrelu, %044_shortcut)\n",
      "  %048_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%047_shortcut, %048_convolutional_conv_weights)\n",
      "  %048_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%048_convolutional, %048_convolutional_bn_scale, %048_convolutional_bn_bias, %048_convolutional_bn_mean, %048_convolutional_bn_var)\n",
      "  %048_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%048_convolutional_bn)\n",
      "  %049_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%048_convolutional_lrelu, %049_convolutional_conv_weights)\n",
      "  %049_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%049_convolutional, %049_convolutional_bn_scale, %049_convolutional_bn_bias, %049_convolutional_bn_mean, %049_convolutional_bn_var)\n",
      "  %049_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%049_convolutional_bn)\n",
      "  %050_shortcut = Add(%049_convolutional_lrelu, %047_shortcut)\n",
      "  %051_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%050_shortcut, %051_convolutional_conv_weights)\n",
      "  %051_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%051_convolutional, %051_convolutional_bn_scale, %051_convolutional_bn_bias, %051_convolutional_bn_mean, %051_convolutional_bn_var)\n",
      "  %051_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%051_convolutional_bn)\n",
      "  %052_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%051_convolutional_lrelu, %052_convolutional_conv_weights)\n",
      "  %052_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%052_convolutional, %052_convolutional_bn_scale, %052_convolutional_bn_bias, %052_convolutional_bn_mean, %052_convolutional_bn_var)\n",
      "  %052_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%052_convolutional_bn)\n",
      "  %053_shortcut = Add(%052_convolutional_lrelu, %050_shortcut)\n",
      "  %054_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%053_shortcut, %054_convolutional_conv_weights)\n",
      "  %054_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%054_convolutional, %054_convolutional_bn_scale, %054_convolutional_bn_bias, %054_convolutional_bn_mean, %054_convolutional_bn_var)\n",
      "  %054_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%054_convolutional_bn)\n",
      "  %055_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%054_convolutional_lrelu, %055_convolutional_conv_weights)\n",
      "  %055_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%055_convolutional, %055_convolutional_bn_scale, %055_convolutional_bn_bias, %055_convolutional_bn_mean, %055_convolutional_bn_var)\n",
      "  %055_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%055_convolutional_bn)\n",
      "  %056_shortcut = Add(%055_convolutional_lrelu, %053_shortcut)\n",
      "  %057_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%056_shortcut, %057_convolutional_conv_weights)\n",
      "  %057_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%057_convolutional, %057_convolutional_bn_scale, %057_convolutional_bn_bias, %057_convolutional_bn_mean, %057_convolutional_bn_var)\n",
      "  %057_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%057_convolutional_bn)\n",
      "  %058_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%057_convolutional_lrelu, %058_convolutional_conv_weights)\n",
      "  %058_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%058_convolutional, %058_convolutional_bn_scale, %058_convolutional_bn_bias, %058_convolutional_bn_mean, %058_convolutional_bn_var)\n",
      "  %058_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%058_convolutional_bn)\n",
      "  %059_shortcut = Add(%058_convolutional_lrelu, %056_shortcut)\n",
      "  %060_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%059_shortcut, %060_convolutional_conv_weights)\n",
      "  %060_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%060_convolutional, %060_convolutional_bn_scale, %060_convolutional_bn_bias, %060_convolutional_bn_mean, %060_convolutional_bn_var)\n",
      "  %060_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%060_convolutional_bn)\n",
      "  %061_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%060_convolutional_lrelu, %061_convolutional_conv_weights)\n",
      "  %061_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%061_convolutional, %061_convolutional_bn_scale, %061_convolutional_bn_bias, %061_convolutional_bn_mean, %061_convolutional_bn_var)\n",
      "  %061_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%061_convolutional_bn)\n",
      "  %062_shortcut = Add(%061_convolutional_lrelu, %059_shortcut)\n",
      "  %063_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [2, 2]](%062_shortcut, %063_convolutional_conv_weights)\n",
      "  %063_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%063_convolutional, %063_convolutional_bn_scale, %063_convolutional_bn_bias, %063_convolutional_bn_mean, %063_convolutional_bn_var)\n",
      "  %063_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%063_convolutional_bn)\n",
      "  %064_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%063_convolutional_lrelu, %064_convolutional_conv_weights)\n",
      "  %064_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%064_convolutional, %064_convolutional_bn_scale, %064_convolutional_bn_bias, %064_convolutional_bn_mean, %064_convolutional_bn_var)\n",
      "  %064_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%064_convolutional_bn)\n",
      "  %065_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%064_convolutional_lrelu, %065_convolutional_conv_weights)\n",
      "  %065_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%065_convolutional, %065_convolutional_bn_scale, %065_convolutional_bn_bias, %065_convolutional_bn_mean, %065_convolutional_bn_var)\n",
      "  %065_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%065_convolutional_bn)\n",
      "  %066_shortcut = Add(%065_convolutional_lrelu, %063_convolutional_lrelu)\n",
      "  %067_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%066_shortcut, %067_convolutional_conv_weights)\n",
      "  %067_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%067_convolutional, %067_convolutional_bn_scale, %067_convolutional_bn_bias, %067_convolutional_bn_mean, %067_convolutional_bn_var)\n",
      "  %067_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%067_convolutional_bn)\n",
      "  %068_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%067_convolutional_lrelu, %068_convolutional_conv_weights)\n",
      "  %068_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%068_convolutional, %068_convolutional_bn_scale, %068_convolutional_bn_bias, %068_convolutional_bn_mean, %068_convolutional_bn_var)\n",
      "  %068_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%068_convolutional_bn)\n",
      "  %069_shortcut = Add(%068_convolutional_lrelu, %066_shortcut)\n",
      "  %070_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%069_shortcut, %070_convolutional_conv_weights)\n",
      "  %070_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%070_convolutional, %070_convolutional_bn_scale, %070_convolutional_bn_bias, %070_convolutional_bn_mean, %070_convolutional_bn_var)\n",
      "  %070_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%070_convolutional_bn)\n",
      "  %071_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%070_convolutional_lrelu, %071_convolutional_conv_weights)\n",
      "  %071_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%071_convolutional, %071_convolutional_bn_scale, %071_convolutional_bn_bias, %071_convolutional_bn_mean, %071_convolutional_bn_var)\n",
      "  %071_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%071_convolutional_bn)\n",
      "  %072_shortcut = Add(%071_convolutional_lrelu, %069_shortcut)\n",
      "  %073_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%072_shortcut, %073_convolutional_conv_weights)\n",
      "  %073_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%073_convolutional, %073_convolutional_bn_scale, %073_convolutional_bn_bias, %073_convolutional_bn_mean, %073_convolutional_bn_var)\n",
      "  %073_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%073_convolutional_bn)\n",
      "  %074_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%073_convolutional_lrelu, %074_convolutional_conv_weights)\n",
      "  %074_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%074_convolutional, %074_convolutional_bn_scale, %074_convolutional_bn_bias, %074_convolutional_bn_mean, %074_convolutional_bn_var)\n",
      "  %074_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%074_convolutional_bn)\n",
      "  %075_shortcut = Add(%074_convolutional_lrelu, %072_shortcut)\n",
      "  %076_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%075_shortcut, %076_convolutional_conv_weights)\n",
      "  %076_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%076_convolutional, %076_convolutional_bn_scale, %076_convolutional_bn_bias, %076_convolutional_bn_mean, %076_convolutional_bn_var)\n",
      "  %076_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%076_convolutional_bn)\n",
      "  %077_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%076_convolutional_lrelu, %077_convolutional_conv_weights)\n",
      "  %077_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%077_convolutional, %077_convolutional_bn_scale, %077_convolutional_bn_bias, %077_convolutional_bn_mean, %077_convolutional_bn_var)\n",
      "  %077_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%077_convolutional_bn)\n",
      "  %078_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%077_convolutional_lrelu, %078_convolutional_conv_weights)\n",
      "  %078_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%078_convolutional, %078_convolutional_bn_scale, %078_convolutional_bn_bias, %078_convolutional_bn_mean, %078_convolutional_bn_var)\n",
      "  %078_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%078_convolutional_bn)\n",
      "  %079_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%078_convolutional_lrelu, %079_convolutional_conv_weights)\n",
      "  %079_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%079_convolutional, %079_convolutional_bn_scale, %079_convolutional_bn_bias, %079_convolutional_bn_mean, %079_convolutional_bn_var)\n",
      "  %079_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%079_convolutional_bn)\n",
      "  %080_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%079_convolutional_lrelu, %080_convolutional_conv_weights)\n",
      "  %080_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%080_convolutional, %080_convolutional_bn_scale, %080_convolutional_bn_bias, %080_convolutional_bn_mean, %080_convolutional_bn_var)\n",
      "  %080_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%080_convolutional_bn)\n",
      "  %081_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%080_convolutional_lrelu, %081_convolutional_conv_weights)\n",
      "  %081_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%081_convolutional, %081_convolutional_bn_scale, %081_convolutional_bn_bias, %081_convolutional_bn_mean, %081_convolutional_bn_var)\n",
      "  %081_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%081_convolutional_bn)\n",
      "  %082_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%081_convolutional_lrelu, %082_convolutional_conv_weights, %082_convolutional_conv_bias)\n",
      "  %085_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%080_convolutional_lrelu, %085_convolutional_conv_weights)\n",
      "  %085_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%085_convolutional, %085_convolutional_bn_scale, %085_convolutional_bn_bias, %085_convolutional_bn_mean, %085_convolutional_bn_var)\n",
      "  %085_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%085_convolutional_bn)\n",
      "  %086_upsample = Upsample[mode = u'nearest', scales = [1, 1, 2, 2]](%085_convolutional_lrelu)\n",
      "  %087_route = Concat[axis = 1](%086_upsample, %062_shortcut)\n",
      "  %088_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%087_route, %088_convolutional_conv_weights)\n",
      "  %088_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%088_convolutional, %088_convolutional_bn_scale, %088_convolutional_bn_bias, %088_convolutional_bn_mean, %088_convolutional_bn_var)\n",
      "  %088_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%088_convolutional_bn)\n",
      "  %089_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%088_convolutional_lrelu, %089_convolutional_conv_weights)\n",
      "  %089_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%089_convolutional, %089_convolutional_bn_scale, %089_convolutional_bn_bias, %089_convolutional_bn_mean, %089_convolutional_bn_var)\n",
      "  %089_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%089_convolutional_bn)\n",
      "  %090_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%089_convolutional_lrelu, %090_convolutional_conv_weights)\n",
      "  %090_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%090_convolutional, %090_convolutional_bn_scale, %090_convolutional_bn_bias, %090_convolutional_bn_mean, %090_convolutional_bn_var)\n",
      "  %090_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%090_convolutional_bn)\n",
      "  %091_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%090_convolutional_lrelu, %091_convolutional_conv_weights)\n",
      "  %091_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%091_convolutional, %091_convolutional_bn_scale, %091_convolutional_bn_bias, %091_convolutional_bn_mean, %091_convolutional_bn_var)\n",
      "  %091_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%091_convolutional_bn)\n",
      "  %092_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%091_convolutional_lrelu, %092_convolutional_conv_weights)\n",
      "  %092_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%092_convolutional, %092_convolutional_bn_scale, %092_convolutional_bn_bias, %092_convolutional_bn_mean, %092_convolutional_bn_var)\n",
      "  %092_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%092_convolutional_bn)\n",
      "  %093_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%092_convolutional_lrelu, %093_convolutional_conv_weights)\n",
      "  %093_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%093_convolutional, %093_convolutional_bn_scale, %093_convolutional_bn_bias, %093_convolutional_bn_mean, %093_convolutional_bn_var)\n",
      "  %093_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%093_convolutional_bn)\n",
      "  %094_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%093_convolutional_lrelu, %094_convolutional_conv_weights, %094_convolutional_conv_bias)\n",
      "  %097_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%092_convolutional_lrelu, %097_convolutional_conv_weights)\n",
      "  %097_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%097_convolutional, %097_convolutional_bn_scale, %097_convolutional_bn_bias, %097_convolutional_bn_mean, %097_convolutional_bn_var)\n",
      "  %097_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%097_convolutional_bn)\n",
      "  %098_upsample = Upsample[mode = u'nearest', scales = [1, 1, 2, 2]](%097_convolutional_lrelu)\n",
      "  %099_route = Concat[axis = 1](%098_upsample, %037_shortcut)\n",
      "  %100_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%099_route, %100_convolutional_conv_weights)\n",
      "  %100_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%100_convolutional, %100_convolutional_bn_scale, %100_convolutional_bn_bias, %100_convolutional_bn_mean, %100_convolutional_bn_var)\n",
      "  %100_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%100_convolutional_bn)\n",
      "  %101_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%100_convolutional_lrelu, %101_convolutional_conv_weights)\n",
      "  %101_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%101_convolutional, %101_convolutional_bn_scale, %101_convolutional_bn_bias, %101_convolutional_bn_mean, %101_convolutional_bn_var)\n",
      "  %101_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%101_convolutional_bn)\n",
      "  %102_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%101_convolutional_lrelu, %102_convolutional_conv_weights)\n",
      "  %102_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%102_convolutional, %102_convolutional_bn_scale, %102_convolutional_bn_bias, %102_convolutional_bn_mean, %102_convolutional_bn_var)\n",
      "  %102_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%102_convolutional_bn)\n",
      "  %103_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%102_convolutional_lrelu, %103_convolutional_conv_weights)\n",
      "  %103_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%103_convolutional, %103_convolutional_bn_scale, %103_convolutional_bn_bias, %103_convolutional_bn_mean, %103_convolutional_bn_var)\n",
      "  %103_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%103_convolutional_bn)\n",
      "  %104_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%103_convolutional_lrelu, %104_convolutional_conv_weights)\n",
      "  %104_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%104_convolutional, %104_convolutional_bn_scale, %104_convolutional_bn_bias, %104_convolutional_bn_mean, %104_convolutional_bn_var)\n",
      "  %104_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%104_convolutional_bn)\n",
      "  %105_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [3, 3], strides = [1, 1]](%104_convolutional_lrelu, %105_convolutional_conv_weights)\n",
      "  %105_convolutional_bn = BatchNormalization[epsilon = 9.99999974737875e-06, momentum = 0.990000009536743](%105_convolutional, %105_convolutional_bn_scale, %105_convolutional_bn_bias, %105_convolutional_bn_mean, %105_convolutional_bn_var)\n",
      "  %105_convolutional_lrelu = LeakyRelu[alpha = 0.100000001490116](%105_convolutional_bn)\n",
      "  %106_convolutional = Conv[auto_pad = u'SAME_LOWER', dilations = [1, 1], kernel_shape = [1, 1], strides = [1, 1]](%105_convolutional_lrelu, %106_convolutional_conv_weights, %106_convolutional_conv_bias)\n",
      "  return %082_convolutional, %094_convolutional, %106_convolutional\n",
      "}\n"
     ]
    }
   ],
   "source": [
    "#!/usr/bin/env python2\n",
    "#\n",
    "# Copyright 1993-2018 NVIDIA Corporation.  All rights reserved.\n",
    "#\n",
    "# NOTICE TO LICENSEE:\n",
    "#\n",
    "# This source code and/or documentation (\"Licensed Deliverables\") are\n",
    "# subject to NVIDIA intellectual property rights under U.S. and\n",
    "# international Copyright laws.\n",
    "#\n",
    "# These Licensed Deliverables contained herein is PROPRIETARY and\n",
    "# CONFIDENTIAL to NVIDIA and is being provided under the terms and\n",
    "# conditions of a form of NVIDIA software license agreement by and\n",
    "# between NVIDIA and Licensee (\"License Agreement\") or electronically\n",
    "# accepted by Licensee.  Notwithstanding any terms or conditions to\n",
    "# the contrary in the License Agreement, reproduction or disclosure\n",
    "# of the Licensed Deliverables to any third party without the express\n",
    "# written consent of NVIDIA is prohibited.\n",
    "#\n",
    "# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE\n",
    "# LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE\n",
    "# SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE.  IT IS\n",
    "# PROVIDED \"AS IS\" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.\n",
    "# NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED\n",
    "# DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,\n",
    "# NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.\n",
    "# NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE\n",
    "# LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY\n",
    "# SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY\n",
    "# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,\n",
    "# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS\n",
    "# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE\n",
    "# OF THESE LICENSED DELIVERABLES.\n",
    "#\n",
    "# U.S. Government End Users.  These Licensed Deliverables are a\n",
    "# \"commercial item\" as that term is defined at 48 C.F.R. 2.101 (OCT\n",
    "# 1995), consisting of \"commercial computer software\" and \"commercial\n",
    "# computer software documentation\" as such terms are used in 48\n",
    "# C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government\n",
    "# only as a commercial end item.  Consistent with 48 C.F.R.12.212 and\n",
    "# 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all\n",
    "# U.S. Government End Users acquire the Licensed Deliverables with\n",
    "# only those rights set forth herein.\n",
    "#\n",
    "# Any use of the Licensed Deliverables in individual and commercial\n",
    "# software must include, in the user documentation and internal\n",
    "# comments to the code, the above Disclaimer and U.S. Government End\n",
    "# Users Notice.\n",
    "#\n",
    "\n",
    "from __future__ import print_function\n",
    "from collections import OrderedDict\n",
    "import hashlib\n",
    "import os.path\n",
    "\n",
    "\n",
    "import onnx\n",
    "from onnx import helper\n",
    "from onnx import TensorProto\n",
    "import numpy as np\n",
    "\n",
    "import sys\n",
    "\n",
    "\n",
    "class DarkNetParser(object):\n",
    "    \"\"\"Definition of a parser for DarkNet-based YOLOv3-608 (only tested for this topology).\"\"\"\n",
    "\n",
    "    def __init__(self, supported_layers):\n",
    "        \"\"\"Initializes a DarkNetParser object.\n",
    "\n",
    "        Keyword argument:\n",
    "        supported_layers -- a string list of supported layers in DarkNet naming convention,\n",
    "        parameters are only added to the class dictionary if a parsed layer is included.\n",
    "        \"\"\"\n",
    "\n",
    "        # A list of YOLOv3 layers containing dictionaries with all layer\n",
    "        # parameters:\n",
    "        self.layer_configs = OrderedDict()\n",
    "        self.supported_layers = supported_layers\n",
    "        self.layer_counter = 0\n",
    "\n",
    "    def parse_cfg_file(self, cfg_file_path):\n",
    "        \"\"\"Takes the yolov3.cfg file and parses it layer by layer,\n",
    "        appending each layer's parameters as a dictionary to layer_configs.\n",
    "\n",
    "        Keyword argument:\n",
    "        cfg_file_path -- path to the yolov3.cfg file as string\n",
    "        \"\"\"\n",
    "        with open(cfg_file_path, 'rb') as cfg_file:\n",
    "            remainder = cfg_file.read()\n",
    "            while remainder is not None:\n",
    "                layer_dict, layer_name, remainder = self._next_layer(remainder)\n",
    "                if layer_dict is not None:\n",
    "                    self.layer_configs[layer_name] = layer_dict\n",
    "        return self.layer_configs\n",
    "\n",
    "    def _next_layer(self, remainder):\n",
    "        \"\"\"Takes in a string and segments it by looking for DarkNet delimiters.\n",
    "        Returns the layer parameters and the remaining string after the last delimiter.\n",
    "        Example for the first Conv layer in yolo.cfg ...\n",
    "\n",
    "        [convolutional]\n",
    "        batch_normalize=1\n",
    "        filters=32\n",
    "        size=3\n",
    "        stride=1\n",
    "        pad=1\n",
    "        activation=leaky\n",
    "\n",
    "        ... becomes the following layer_dict return value:\n",
    "        {'activation': 'leaky', 'stride': 1, 'pad': 1, 'filters': 32,\n",
    "        'batch_normalize': 1, 'type': 'convolutional', 'size': 3}.\n",
    "\n",
    "        '001_convolutional' is returned as layer_name, and all lines that follow in yolo.cfg\n",
    "        are returned as the next remainder.\n",
    "\n",
    "        Keyword argument:\n",
    "        remainder -- a string with all raw text after the previously parsed layer\n",
    "        \"\"\"\n",
    "        remainder = remainder.split('[', 1)\n",
    "        if len(remainder) == 2:\n",
    "            remainder = remainder[1]\n",
    "        else:\n",
    "            return None, None, None\n",
    "        remainder = remainder.split(']', 1)\n",
    "        if len(remainder) == 2:\n",
    "            layer_type, remainder = remainder\n",
    "        else:\n",
    "            return None, None, None\n",
    "        if remainder.replace(' ', '')[0] == '#':\n",
    "            remainder = remainder.split('\\n', 1)[1]\n",
    "\n",
    "        layer_param_block, remainder = remainder.split('\\n\\n', 1)\n",
    "        layer_param_lines = layer_param_block.split('\\n')[1:]\n",
    "        layer_name = str(self.layer_counter).zfill(3) + '_' + layer_type\n",
    "        layer_dict = dict(type=layer_type)\n",
    "        if layer_type in self.supported_layers:\n",
    "            for param_line in layer_param_lines:\n",
    "                if param_line[0] == '#':\n",
    "                    continue\n",
    "                param_type, param_value = self._parse_params(param_line)\n",
    "                layer_dict[param_type] = param_value\n",
    "        self.layer_counter += 1\n",
    "        return layer_dict, layer_name, remainder\n",
    "\n",
    "    def _parse_params(self, param_line):\n",
    "        \"\"\"Identifies the parameters contained in one of the cfg file and returns\n",
    "        them in the required format for each parameter type, e.g. as a list, an int or a float.\n",
    "\n",
    "        Keyword argument:\n",
    "        param_line -- one parsed line within a layer block\n",
    "        \"\"\"\n",
    "        param_line = param_line.replace(' ', '')\n",
    "        param_type, param_value_raw = param_line.split('=')\n",
    "        param_value = None\n",
    "        if param_type == 'layers':\n",
    "            layer_indexes = list()\n",
    "            for index in param_value_raw.split(','):\n",
    "                layer_indexes.append(int(index))\n",
    "            param_value = layer_indexes\n",
    "        elif isinstance(param_value_raw, str) and not param_value_raw.isalpha():\n",
    "            condition_param_value_positive = param_value_raw.isdigit()\n",
    "            condition_param_value_negative = param_value_raw[0] == '-' and \\\n",
    "                                             param_value_raw[1:].isdigit()\n",
    "            if condition_param_value_positive or condition_param_value_negative:\n",
    "                param_value = int(param_value_raw)\n",
    "            else:\n",
    "                param_value = float(param_value_raw)\n",
    "        else:\n",
    "            param_value = str(param_value_raw)\n",
    "        return param_type, param_value\n",
    "\n",
    "\n",
    "class MajorNodeSpecs(object):\n",
    "    \"\"\"Helper class used to store the names of ONNX output names,\n",
    "    corresponding to the output of a DarkNet layer and its output channels.\n",
    "    Some DarkNet layers are not created and there is no corresponding ONNX node,\n",
    "    but we still need to track them in order to set up skip connections.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, name, channels):\n",
    "        \"\"\" Initialize a MajorNodeSpecs object.\n",
    "\n",
    "        Keyword arguments:\n",
    "        name -- name of the ONNX node\n",
    "        channels -- number of output channels of this node\n",
    "        \"\"\"\n",
    "        self.name = name\n",
    "        self.channels = channels\n",
    "        self.created_onnx_node = False\n",
    "        if name is not None and isinstance(channels, int) and channels > 0:\n",
    "            self.created_onnx_node = True\n",
    "\n",
    "\n",
    "class ConvParams(object):\n",
    "    \"\"\"Helper class to store the hyper parameters of a Conv layer,\n",
    "    including its prefix name in the ONNX graph and the expected dimensions\n",
    "    of weights for convolution, bias, and batch normalization.\n",
    "\n",
    "    Additionally acts as a wrapper for generating safe names for all\n",
    "    weights, checking on feasible combinations.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, node_name, batch_normalize, conv_weight_dims):\n",
    "        \"\"\"Constructor based on the base node name (e.g. 101_convolutional), the batch\n",
    "        normalization setting, and the convolutional weights shape.\n",
    "\n",
    "        Keyword arguments:\n",
    "        node_name -- base name of this YOLO convolutional layer\n",
    "        batch_normalize -- bool value if batch normalization is used\n",
    "        conv_weight_dims -- the dimensions of this layer's convolutional weights\n",
    "        \"\"\"\n",
    "        self.node_name = node_name\n",
    "        self.batch_normalize = batch_normalize\n",
    "        assert len(conv_weight_dims) == 4\n",
    "        self.conv_weight_dims = conv_weight_dims\n",
    "\n",
    "    def generate_param_name(self, param_category, suffix):\n",
    "        \"\"\"Generates a name based on two string inputs,\n",
    "        and checks if the combination is valid.\"\"\"\n",
    "        assert suffix\n",
    "        assert param_category in ['bn', 'conv']\n",
    "        assert (suffix in ['scale', 'mean', 'var', 'weights', 'bias'])\n",
    "        if param_category == 'bn':\n",
    "            assert self.batch_normalize\n",
    "            assert suffix in ['scale', 'bias', 'mean', 'var']\n",
    "        elif param_category == 'conv':\n",
    "            assert suffix in ['weights', 'bias']\n",
    "            if suffix == 'bias':\n",
    "                assert not self.batch_normalize\n",
    "        param_name = self.node_name + '_' + param_category + '_' + suffix\n",
    "        return param_name\n",
    "\n",
    "\n",
    "class WeightLoader(object):\n",
    "    \"\"\"Helper class used for loading the serialized weights of a binary file stream\n",
    "    and returning the initializers and the input tensors required for populating\n",
    "    the ONNX graph with weights.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, weights_file_path):\n",
    "        \"\"\"Initialized with a path to the YOLOv3 .weights file.\n",
    "\n",
    "        Keyword argument:\n",
    "        weights_file_path -- path to the weights file.\n",
    "        \"\"\"\n",
    "        self.weights_file = self._open_weights_file(weights_file_path)\n",
    "\n",
    "    def load_conv_weights(self, conv_params):\n",
    "        \"\"\"Returns the initializers with weights from the weights file and\n",
    "        the input tensors of a convolutional layer for all corresponding ONNX nodes.\n",
    "\n",
    "        Keyword argument:\n",
    "        conv_params -- a ConvParams object\n",
    "        \"\"\"\n",
    "        initializer = list()\n",
    "        inputs = list()\n",
    "        if conv_params.batch_normalize:\n",
    "            bias_init, bias_input = self._create_param_tensors(\n",
    "                conv_params, 'bn', 'bias')\n",
    "            bn_scale_init, bn_scale_input = self._create_param_tensors(\n",
    "                conv_params, 'bn', 'scale')\n",
    "            bn_mean_init, bn_mean_input = self._create_param_tensors(\n",
    "                conv_params, 'bn', 'mean')\n",
    "            bn_var_init, bn_var_input = self._create_param_tensors(\n",
    "                conv_params, 'bn', 'var')\n",
    "            initializer.extend(\n",
    "                [bn_scale_init, bias_init, bn_mean_init, bn_var_init])\n",
    "            inputs.extend([bn_scale_input, bias_input,\n",
    "                           bn_mean_input, bn_var_input])\n",
    "        else:\n",
    "            bias_init, bias_input = self._create_param_tensors(\n",
    "                conv_params, 'conv', 'bias')\n",
    "            initializer.append(bias_init)\n",
    "            inputs.append(bias_input)\n",
    "        conv_init, conv_input = self._create_param_tensors(\n",
    "            conv_params, 'conv', 'weights')\n",
    "        initializer.append(conv_init)\n",
    "        inputs.append(conv_input)\n",
    "        return initializer, inputs\n",
    "\n",
    "    def _open_weights_file(self, weights_file_path):\n",
    "        \"\"\"Opens a YOLOv3 DarkNet file stream and skips the header.\n",
    "\n",
    "        Keyword argument:\n",
    "        weights_file_path -- path to the weights file.\n",
    "        \"\"\"\n",
    "        weights_file = open(weights_file_path, 'rb')\n",
    "        length_header = 5\n",
    "        np.ndarray(\n",
    "            shape=(length_header,), dtype='int32', buffer=weights_file.read(\n",
    "                length_header * 4))\n",
    "        return weights_file\n",
    "\n",
    "    def _create_param_tensors(self, conv_params, param_category, suffix):\n",
    "        \"\"\"Creates the initializers with weights from the weights file together with\n",
    "        the input tensors.\n",
    "\n",
    "        Keyword arguments:\n",
    "        conv_params -- a ConvParams object\n",
    "        param_category -- the category of parameters to be created ('bn' or 'conv')\n",
    "        suffix -- a string determining the sub-type of above param_category (e.g.,\n",
    "        'weights' or 'bias')\n",
    "        \"\"\"\n",
    "        param_name, param_data, param_data_shape = self._load_one_param_type(\n",
    "            conv_params, param_category, suffix)\n",
    "\n",
    "        initializer_tensor = helper.make_tensor(\n",
    "            param_name, TensorProto.FLOAT, param_data_shape, param_data)\n",
    "        input_tensor = helper.make_tensor_value_info(\n",
    "            param_name, TensorProto.FLOAT, param_data_shape)\n",
    "        return initializer_tensor, input_tensor\n",
    "\n",
    "    def _load_one_param_type(self, conv_params, param_category, suffix):\n",
    "        \"\"\"Deserializes the weights from a file stream in the DarkNet order.\n",
    "\n",
    "        Keyword arguments:\n",
    "        conv_params -- a ConvParams object\n",
    "        param_category -- the category of parameters to be created ('bn' or 'conv')\n",
    "        suffix -- a string determining the sub-type of above param_category (e.g.,\n",
    "        'weights' or 'bias')\n",
    "        \"\"\"\n",
    "        param_name = conv_params.generate_param_name(param_category, suffix)\n",
    "        channels_out, channels_in, filter_h, filter_w = conv_params.conv_weight_dims\n",
    "        if param_category == 'bn':\n",
    "            param_shape = [channels_out]\n",
    "        elif param_category == 'conv':\n",
    "            if suffix == 'weights':\n",
    "                param_shape = [channels_out, channels_in, filter_h, filter_w]\n",
    "            elif suffix == 'bias':\n",
    "                param_shape = [channels_out]\n",
    "        param_size = np.product(np.array(param_shape))\n",
    "        param_data = np.ndarray(\n",
    "            shape=param_shape,\n",
    "            dtype='float32',\n",
    "            buffer=self.weights_file.read(param_size * 4))\n",
    "        param_data = param_data.flatten().astype(float)\n",
    "        return param_name, param_data, param_shape\n",
    "\n",
    "\n",
    "class GraphBuilderONNX(object):\n",
    "    \"\"\"Class for creating an ONNX graph from a previously generated list of layer dictionaries.\"\"\"\n",
    "\n",
    "    def __init__(self, output_tensors):\n",
    "        \"\"\"Initialize with all DarkNet default parameters used creating YOLOv3,\n",
    "        and specify the output tensors as an OrderedDict for their output dimensions\n",
    "        with their names as keys.\n",
    "\n",
    "        Keyword argument:\n",
    "        output_tensors -- the output tensors as an OrderedDict containing the keys'\n",
    "        output dimensions\n",
    "        \"\"\"\n",
    "        self.output_tensors = output_tensors\n",
    "        self._nodes = list()\n",
    "        self.graph_def = None\n",
    "        self.input_tensor = None\n",
    "        self.epsilon_bn = 1e-5\n",
    "        self.momentum_bn = 0.99\n",
    "        self.alpha_lrelu = 0.1\n",
    "        self.param_dict = OrderedDict()\n",
    "        self.major_node_specs = list()\n",
    "        self.batch_size = 1\n",
    "\n",
    "    def build_onnx_graph(\n",
    "            self,\n",
    "            layer_configs,\n",
    "            weights_file_path,\n",
    "            verbose=True):\n",
    "        \"\"\"Iterate over all layer configs (parsed from the DarkNet representation\n",
    "        of YOLOv3-608), create an ONNX graph, populate it with weights from the weights\n",
    "        file and return the graph definition.\n",
    "\n",
    "        Keyword arguments:\n",
    "        layer_configs -- an OrderedDict object with all parsed layers' configurations\n",
    "        weights_file_path -- location of the weights file\n",
    "        verbose -- toggles if the graph is printed after creation (default: True)\n",
    "        \"\"\"\n",
    "        for layer_name in layer_configs.keys():\n",
    "            layer_dict = layer_configs[layer_name]\n",
    "            major_node_specs = self._make_onnx_node(layer_name, layer_dict)\n",
    "            if major_node_specs.name is not None:\n",
    "                self.major_node_specs.append(major_node_specs)\n",
    "        outputs = list()\n",
    "        for tensor_name in self.output_tensors.keys():\n",
    "            output_dims = [self.batch_size, ] + \\\n",
    "                          self.output_tensors[tensor_name]\n",
    "            output_tensor = helper.make_tensor_value_info(\n",
    "                tensor_name, TensorProto.FLOAT, output_dims)\n",
    "            outputs.append(output_tensor)\n",
    "        inputs = [self.input_tensor]\n",
    "        weight_loader = WeightLoader(weights_file_path)\n",
    "        initializer = list()\n",
    "        for layer_name in self.param_dict.keys():\n",
    "            _, layer_type = layer_name.split('_', 1)\n",
    "            conv_params = self.param_dict[layer_name]\n",
    "            assert layer_type == 'convolutional'\n",
    "            initializer_layer, inputs_layer = weight_loader.load_conv_weights(\n",
    "                conv_params)\n",
    "            initializer.extend(initializer_layer)\n",
    "            inputs.extend(inputs_layer)\n",
    "        del weight_loader\n",
    "        self.graph_def = helper.make_graph(\n",
    "            nodes=self._nodes,\n",
    "            name='YOLOv3-608',\n",
    "            inputs=inputs,\n",
    "            outputs=outputs,\n",
    "            initializer=initializer\n",
    "        )\n",
    "        if verbose:\n",
    "            print(helper.printable_graph(self.graph_def))\n",
    "        model_def = helper.make_model(self.graph_def,\n",
    "                                      producer_name='NVIDIA TensorRT sample')\n",
    "        return model_def\n",
    "\n",
    "    def _make_onnx_node(self, layer_name, layer_dict):\n",
    "        \"\"\"Take in a layer parameter dictionary, choose the correct function for\n",
    "        creating an ONNX node and store the information important to graph creation\n",
    "        as a MajorNodeSpec object.\n",
    "\n",
    "        Keyword arguments:\n",
    "        layer_name -- the layer's name (also the corresponding key in layer_configs)\n",
    "        layer_dict -- a layer parameter dictionary (one element of layer_configs)\n",
    "        \"\"\"\n",
    "        layer_type = layer_dict['type']\n",
    "        if self.input_tensor is None:\n",
    "            if layer_type == 'net':\n",
    "                major_node_output_name, major_node_output_channels = self._make_input_tensor(\n",
    "                    layer_name, layer_dict)\n",
    "                major_node_specs = MajorNodeSpecs(major_node_output_name,\n",
    "                                                  major_node_output_channels)\n",
    "            else:\n",
    "                raise ValueError('The first node has to be of type \"net\".')\n",
    "        else:\n",
    "            node_creators = dict()\n",
    "            node_creators['convolutional'] = self._make_conv_node\n",
    "            node_creators['shortcut'] = self._make_shortcut_node\n",
    "            node_creators['route'] = self._make_route_node\n",
    "            node_creators['upsample'] = self._make_upsample_node\n",
    "\n",
    "            if layer_type in node_creators.keys():\n",
    "                major_node_output_name, major_node_output_channels = \\\n",
    "                    node_creators[layer_type](layer_name, layer_dict)\n",
    "                major_node_specs = MajorNodeSpecs(major_node_output_name,\n",
    "                                                  major_node_output_channels)\n",
    "            else:\n",
    "                print(\n",
    "                    'Layer of type %s not supported, skipping ONNX node generation.' %\n",
    "                    layer_type)\n",
    "                major_node_specs = MajorNodeSpecs(layer_name,\n",
    "                                                  None)\n",
    "        return major_node_specs\n",
    "\n",
    "    def _make_input_tensor(self, layer_name, layer_dict):\n",
    "        \"\"\"Create an ONNX input tensor from a 'net' layer and store the batch size.\n",
    "\n",
    "        Keyword arguments:\n",
    "        layer_name -- the layer's name (also the corresponding key in layer_configs)\n",
    "        layer_dict -- a layer parameter dictionary (one element of layer_configs)\n",
    "        \"\"\"\n",
    "        batch_size = layer_dict['batch']\n",
    "        channels = layer_dict['channels']\n",
    "        height = layer_dict['height']\n",
    "        width = layer_dict['width']\n",
    "        self.batch_size = batch_size\n",
    "        input_tensor = helper.make_tensor_value_info(\n",
    "            str(layer_name), TensorProto.FLOAT, [\n",
    "                batch_size, channels, height, width])\n",
    "        self.input_tensor = input_tensor\n",
    "        return layer_name, channels\n",
    "\n",
    "    def _get_previous_node_specs(self, target_index=-1):\n",
    "        \"\"\"Get a previously generated ONNX node (skip those that were not generated).\n",
    "        Target index can be passed for jumping to a specific index.\n",
    "\n",
    "        Keyword arguments:\n",
    "        target_index -- optional for jumping to a specific index (default: -1 for jumping\n",
    "        to previous element)\n",
    "        \"\"\"\n",
    "        previous_node = None\n",
    "        for node in self.major_node_specs[target_index::-1]:\n",
    "            if node.created_onnx_node:\n",
    "                previous_node = node\n",
    "                break\n",
    "        assert previous_node is not None\n",
    "        return previous_node\n",
    "\n",
    "    def _make_conv_node(self, layer_name, layer_dict):\n",
    "        \"\"\"Create an ONNX Conv node with optional batch normalization and\n",
    "        activation nodes.\n",
    "\n",
    "        Keyword arguments:\n",
    "        layer_name -- the layer's name (also the corresponding key in layer_configs)\n",
    "        layer_dict -- a layer parameter dictionary (one element of layer_configs)\n",
    "        \"\"\"\n",
    "        previous_node_specs = self._get_previous_node_specs()\n",
    "        inputs = [previous_node_specs.name]\n",
    "        previous_channels = previous_node_specs.channels\n",
    "        kernel_size = layer_dict['size']\n",
    "        stride = layer_dict['stride']\n",
    "        filters = layer_dict['filters']\n",
    "        batch_normalize = False\n",
    "        if 'batch_normalize' in layer_dict.keys(\n",
    "        ) and layer_dict['batch_normalize'] == 1:\n",
    "            batch_normalize = True\n",
    "\n",
    "        kernel_shape = [kernel_size, kernel_size]\n",
    "        weights_shape = [filters, previous_channels] + kernel_shape\n",
    "        conv_params = ConvParams(layer_name, batch_normalize, weights_shape)\n",
    "\n",
    "        strides = [stride, stride]\n",
    "        dilations = [1, 1]\n",
    "        weights_name = conv_params.generate_param_name('conv', 'weights')\n",
    "        inputs.append(weights_name)\n",
    "        if not batch_normalize:\n",
    "            bias_name = conv_params.generate_param_name('conv', 'bias')\n",
    "            inputs.append(bias_name)\n",
    "\n",
    "        conv_node = helper.make_node(\n",
    "            'Conv',\n",
    "            inputs=inputs,\n",
    "            outputs=[layer_name],\n",
    "            kernel_shape=kernel_shape,\n",
    "            strides=strides,\n",
    "            auto_pad='SAME_LOWER',\n",
    "            dilations=dilations,\n",
    "            name=layer_name\n",
    "        )\n",
    "        self._nodes.append(conv_node)\n",
    "        inputs = [layer_name]\n",
    "        layer_name_output = layer_name\n",
    "\n",
    "        if batch_normalize:\n",
    "            layer_name_bn = layer_name + '_bn'\n",
    "            bn_param_suffixes = ['scale', 'bias', 'mean', 'var']\n",
    "            for suffix in bn_param_suffixes:\n",
    "                bn_param_name = conv_params.generate_param_name('bn', suffix)\n",
    "                inputs.append(bn_param_name)\n",
    "            batchnorm_node = helper.make_node(\n",
    "                'BatchNormalization',\n",
    "                inputs=inputs,\n",
    "                outputs=[layer_name_bn],\n",
    "                epsilon=self.epsilon_bn,\n",
    "                momentum=self.momentum_bn,\n",
    "                name=layer_name_bn\n",
    "            )\n",
    "            self._nodes.append(batchnorm_node)\n",
    "            inputs = [layer_name_bn]\n",
    "            layer_name_output = layer_name_bn\n",
    "\n",
    "        if layer_dict['activation'] == 'leaky':\n",
    "            layer_name_lrelu = layer_name + '_lrelu'\n",
    "\n",
    "            lrelu_node = helper.make_node(\n",
    "                'LeakyRelu',\n",
    "                inputs=inputs,\n",
    "                outputs=[layer_name_lrelu],\n",
    "                name=layer_name_lrelu,\n",
    "                alpha=self.alpha_lrelu\n",
    "            )\n",
    "            self._nodes.append(lrelu_node)\n",
    "            inputs = [layer_name_lrelu]\n",
    "            layer_name_output = layer_name_lrelu\n",
    "        elif layer_dict['activation'] == 'linear':\n",
    "            pass\n",
    "        else:\n",
    "            print('Activation not supported.')\n",
    "\n",
    "        self.param_dict[layer_name] = conv_params\n",
    "        return layer_name_output, filters\n",
    "\n",
    "    def _make_shortcut_node(self, layer_name, layer_dict):\n",
    "        \"\"\"Create an ONNX Add node with the shortcut properties from\n",
    "        the DarkNet-based graph.\n",
    "\n",
    "        Keyword arguments:\n",
    "        layer_name -- the layer's name (also the corresponding key in layer_configs)\n",
    "        layer_dict -- a layer parameter dictionary (one element of layer_configs)\n",
    "        \"\"\"\n",
    "        shortcut_index = layer_dict['from']\n",
    "        activation = layer_dict['activation']\n",
    "        assert activation == 'linear'\n",
    "\n",
    "        first_node_specs = self._get_previous_node_specs()\n",
    "        second_node_specs = self._get_previous_node_specs(\n",
    "            target_index=shortcut_index)\n",
    "        assert first_node_specs.channels == second_node_specs.channels\n",
    "        channels = first_node_specs.channels\n",
    "        inputs = [first_node_specs.name, second_node_specs.name]\n",
    "        shortcut_node = helper.make_node(\n",
    "            'Add',\n",
    "            inputs=inputs,\n",
    "            outputs=[layer_name],\n",
    "            name=layer_name,\n",
    "        )\n",
    "        self._nodes.append(shortcut_node)\n",
    "        return layer_name, channels\n",
    "\n",
    "    def _make_route_node(self, layer_name, layer_dict):\n",
    "        \"\"\"If the 'layers' parameter from the DarkNet configuration is only one index, continue\n",
    "        node creation at the indicated (negative) index. Otherwise, create an ONNX Concat node\n",
    "        with the route properties from the DarkNet-based graph.\n",
    "\n",
    "        Keyword arguments:\n",
    "        layer_name -- the layer's name (also the corresponding key in layer_configs)\n",
    "        layer_dict -- a layer parameter dictionary (one element of layer_configs)\n",
    "        \"\"\"\n",
    "        route_node_indexes = layer_dict['layers']\n",
    "        if len(route_node_indexes) == 1:\n",
    "            split_index = route_node_indexes[0]\n",
    "            assert split_index < 0\n",
    "            # Increment by one because we skipped the YOLO layer:\n",
    "            split_index += 1\n",
    "            self.major_node_specs = self.major_node_specs[:split_index]\n",
    "            layer_name = None\n",
    "            channels = None\n",
    "        else:\n",
    "            inputs = list()\n",
    "            channels = 0\n",
    "            for index in route_node_indexes:\n",
    "                if index > 0:\n",
    "                    # Increment by one because we count the input as a node (DarkNet\n",
    "                    # does not)\n",
    "                    index += 1\n",
    "                route_node_specs = self._get_previous_node_specs(\n",
    "                    target_index=index)\n",
    "                inputs.append(route_node_specs.name)\n",
    "                channels += route_node_specs.channels\n",
    "            assert inputs\n",
    "            assert channels > 0\n",
    "\n",
    "            route_node = helper.make_node(\n",
    "                'Concat',\n",
    "                axis=1,\n",
    "                inputs=inputs,\n",
    "                outputs=[layer_name],\n",
    "                name=layer_name,\n",
    "            )\n",
    "            self._nodes.append(route_node)\n",
    "        return layer_name, channels\n",
    "\n",
    "    def _make_upsample_node(self, layer_name, layer_dict):\n",
    "        \"\"\"Create an ONNX Upsample node with the properties from\n",
    "        the DarkNet-based graph.\n",
    "\n",
    "        Keyword arguments:\n",
    "        layer_name -- the layer's name (also the corresponding key in layer_configs)\n",
    "        layer_dict -- a layer parameter dictionary (one element of layer_configs)\n",
    "        \"\"\"\n",
    "        upsample_factor = float(layer_dict['stride'])\n",
    "        previous_node_specs = self._get_previous_node_specs()\n",
    "        inputs = [previous_node_specs.name]\n",
    "        channels = previous_node_specs.channels\n",
    "        assert channels > 0\n",
    "        upsample_node = helper.make_node(\n",
    "            'Upsample',\n",
    "            mode='nearest',\n",
    "            # For ONNX versions <0.7.0, Upsample nodes accept different parameters than 'scales':\n",
    "            scales=[1.0, 1.0, upsample_factor, upsample_factor],\n",
    "            inputs=inputs,\n",
    "            outputs=[layer_name],\n",
    "            name=layer_name,\n",
    "        )\n",
    "        self._nodes.append(upsample_node)\n",
    "        return layer_name, channels\n",
    "\n",
    "\n",
    "def generate_md5_checksum(local_path):\n",
    "    \"\"\"Returns the MD5 checksum of a local file.\n",
    "\n",
    "    Keyword argument:\n",
    "    local_path -- path of the file whose checksum shall be generated\n",
    "    \"\"\"\n",
    "    with open(local_path) as local_file:\n",
    "        data = local_file.read()\n",
    "        return hashlib.md5(data).hexdigest()\n",
    "\n",
    "\n",
    "def download_file(local_path, link, checksum_reference=None):\n",
    "    \"\"\"Checks if a local file is present and downloads it from the specified path otherwise.\n",
    "    If checksum_reference is specified, the file's md5 checksum is compared against the\n",
    "    expected value.\n",
    "\n",
    "    Keyword arguments:\n",
    "    local_path -- path of the file whose checksum shall be generated\n",
    "    link -- link where the file shall be downloaded from if it is not found locally\n",
    "    checksum_reference -- expected MD5 checksum of the file\n",
    "    \"\"\"\n",
    "    if not os.path.exists(local_path):\n",
    "        print('Downloading from %s, this may take a while...' % link)\n",
    "        wget.download(link, local_path)\n",
    "        print()\n",
    "    if checksum_reference is not None:\n",
    "        checksum = generate_md5_checksum(local_path)\n",
    "        if checksum != checksum_reference:\n",
    "            raise ValueError(\n",
    "                'The MD5 checksum of local file %s differs from %s, please manually remove \\\n",
    "                 the file and try again.' %\n",
    "                (local_path, checksum_reference))\n",
    "    return local_path\n",
    "\n",
    "\n",
    "def main():\n",
    "    \"\"\"Run the DarkNet-to-ONNX conversion for YOLOv3-608.\"\"\"\n",
    "    # Have to use python 2 due to hashlib compatibility\n",
    "    if sys.version_info[0] > 2:\n",
    "        raise Exception(\"This is script is only compatible with python2, please re-run this script \\\n",
    "    with python2. The rest of this sample can be run with either version of python\")\n",
    "\n",
    "    # Download the config for YOLOv3 if not present yet, and analyze the checksum:\n",
    "    cfg_file_path = 'cfg/face-Copy1.cfg'\n",
    "\n",
    "    # These are the only layers DarkNetParser will extract parameters from. The three layers of\n",
    "    # type 'yolo' are not parsed in detail because they are included in the post-processing later:\n",
    "    supported_layers = ['net', 'convolutional', 'shortcut',\n",
    "                        'route', 'upsample']\n",
    "\n",
    "    # Create a DarkNetParser object, and the use it to generate an OrderedDict with all\n",
    "    # layer's configs from the cfg file:\n",
    "    parser = DarkNetParser(supported_layers)\n",
    "    layer_configs = parser.parse_cfg_file(cfg_file_path)\n",
    "    # We do not need the parser anymore after we got layer_configs:\n",
    "    del parser\n",
    "\n",
    "    # In above layer_config, there are three outputs that we need to know the output\n",
    "    # shape of (in CHW format):\n",
    "    output_tensor_dims = OrderedDict()\n",
    "    output_tensor_dims['082_convolutional'] = [18, 19, 19] #255代表的是3*（classes + 4 + 1）\n",
    "    output_tensor_dims['094_convolutional'] = [18, 38, 38] # 255代表的是3*（classes + 4 + 1）\n",
    "    output_tensor_dims['106_convolutional'] = [18, 76, 76] # 255代表的是3*（classes + 4 + 1）\n",
    "\n",
    "    # Create a GraphBuilderONNX object with the known output tensor dimensions:\n",
    "    builder = GraphBuilderONNX(output_tensor_dims)\n",
    "\n",
    "    # We want to populate our network with weights later, that's why we download those from\n",
    "    # the official mirror (and verify the checksum):\n",
    "    weights_file_path = './backup/face_10000.weights'\n",
    "\n",
    "    # Now generate an ONNX graph with weights from the previously parsed layer configurations\n",
    "    # and the weights file:\n",
    "    yolov3_model_def = builder.build_onnx_graph(\n",
    "        layer_configs=layer_configs,\n",
    "        weights_file_path=weights_file_path,\n",
    "        verbose=True)\n",
    "    # Once we have the model definition, we do not need the builder anymore:\n",
    "    del builder\n",
    "\n",
    "    # Perform a sanity check on the ONNX model definition:\n",
    "    onnx.checker.check_model(yolov3_model_def)\n",
    "\n",
    "    # Serialize the generated ONNX graph to this file:\n",
    "    output_file_path = 'yolov3_608.onnx'\n",
    "    onnx.save(yolov3_model_def, output_file_path)\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    main()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[33mDEPRECATION: Python 2.7 will reach the end of its life on January 1st, 2020. Please upgrade your Python as Python 2.7 won't be maintained after that date. A future version of pip will drop support for Python 2.7. More details about Python 2 support in pip, can be found at https://pip.pypa.io/en/latest/development/release-process/#python-2-support\u001b[0m\n",
      "Looking in indexes: http://mirrors.aliyun.com/pypi/simple/\n",
      "Collecting onnx==1.2.1\n",
      "\u001b[?25l  Downloading http://mirrors.aliyun.com/pypi/packages/24/f3/e42b9a4020b956542029b9ad2e40717de19b96a3af48984fed7b4ba179e2/onnx-1.2.1-cp27-cp27mu-manylinux1_x86_64.whl (3.8MB)\n",
      "\u001b[K     |████████████████████████████████| 3.8MB 1.2MB/s \n",
      "\u001b[?25hRequirement already satisfied: six in /home/zhongls/.conda/envs/zhongls27/lib/python2.7/site-packages (from onnx==1.2.1) (1.12.0)\n",
      "Requirement already satisfied: numpy in /home/zhongls/.conda/envs/zhongls27/lib/python2.7/site-packages (from onnx==1.2.1) (1.16.5)\n",
      "Requirement already satisfied: protobuf in /home/zhongls/.conda/envs/zhongls27/lib/python2.7/site-packages (from onnx==1.2.1) (3.11.3)\n",
      "Requirement already satisfied: typing-extensions>=3.6.2.1 in /home/zhongls/.conda/envs/zhongls27/lib/python2.7/site-packages (from onnx==1.2.1) (3.7.4.2)\n",
      "Requirement already satisfied: typing>=3.6.4 in /home/zhongls/.conda/envs/zhongls27/lib/python2.7/site-packages (from onnx==1.2.1) (3.7.4.1)\n",
      "Requirement already satisfied: setuptools in /home/zhongls/.conda/envs/zhongls27/lib/python2.7/site-packages (from protobuf->onnx==1.2.1) (41.6.0.post20191030)\n",
      "Installing collected packages: onnx\n",
      "  Found existing installation: onnx 1.4.1\n",
      "    Uninstalling onnx-1.4.1:\n",
      "      Successfully uninstalled onnx-1.4.1\n",
      "Successfully installed onnx-1.2.1\n"
     ]
    }
   ],
   "source": [
    "!pip install onnx==1.2.1 -i http://mirrors.aliyun.com/pypi/simple/  --trusted-host mirrors.aliyun.com"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "pip install onnxruntime # CPU build"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python [conda env:.conda-zhongls27]",
   "language": "python",
   "name": "conda-env-.conda-zhongls27-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.17"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
