{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# from torch to tflite\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-11-19 01:19:12.086204: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
      "2024-11-19 01:19:12.086242: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
      "2024-11-19 01:19:12.087113: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
      "2024-11-19 01:19:12.092586: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
      "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2024-11-19 01:19:12.874978: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
      "/home/chenxin/anaconda3/envs/ocdet/lib/python3.10/site-packages/tensorflow_addons/utils/tfa_eol_msg.py:23: UserWarning: \n",
      "\n",
      "TensorFlow Addons (TFA) has ended development and introduction of new features.\n",
      "TFA has entered a minimal maintenance and release mode until a planned end of life in May 2024.\n",
      "Please modify downstream libraries to take dependencies from other repositories in our TensorFlow community (e.g. Keras, Keras-CV, and Keras-NLP). \n",
      "\n",
      "For more information see: https://github.com/tensorflow/addons/issues/2807 \n",
      "\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import sys\n",
    "\n",
    "# Add the parent directory of the current dir (i.e. root dir of this repo) to the system path\n",
    "sys.path.append(os.path.dirname(os.getcwd()))\n",
    "# set cuda device\n",
    "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
    "\n",
    "from torch import nn\n",
    "\n",
    "from conversion.converter import Converter"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## yolo\n",
    "\n",
    "see also ultralytics/customizations"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/nn/modules/head.py:99: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  if self.dynamic or self.shape != shape:\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/utils/tal.py:308: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results).\n",
      "  for i, stride in enumerate(strides):\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_196_x, add_100_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolov8s_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolov8s_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/yolov8s_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 13:32:45.378298: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 13:32:45.378358: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 13:32:45.378613: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/yolov8s_r320\n",
      "2024-10-07 13:32:45.399277: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 13:32:45.399314: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/yolov8s_r320\n",
      "2024-10-07 13:32:45.427564: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 13:32:45.536170: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/yolov8s_r320\n",
      "2024-10-07 13:32:45.640828: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 262216 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 160, Total Ops 568, % non-converted = 28.17 %\n",
      " * 160 ARITH ops\n",
      "\n",
      "- arith.constant:  160 occurrences  (f32: 131, i32: 29)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 8)\n",
      "  (f32: 19)\n",
      "  (f32: 64)\n",
      "  (f32: 58)\n",
      "  (f32: 3)\n",
      "  (f32: 59)\n",
      "  (f32: 34)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 1)\n",
      "  (f32: 1)\n",
      "  (f32: 18)\n",
      "  (f32: 2)\n",
      "  (f32: 131)\n",
      "2024-10-07 13:32:46.789725: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.605 G  ops, equivalently 3.802 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 13:32:53.892576: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.605 G  ops, equivalently 3.802 G  MACs\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/nn/modules/head.py:99: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  if self.dynamic or self.shape != shape:\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/utils/tal.py:308: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results).\n",
      "  for i, stride in enumerate(strides):\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_256_x, add_137_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolov8m_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolov8m_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/yolov8m_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 13:33:30.191463: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 13:33:30.191531: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 13:33:30.191822: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/yolov8m_r320\n",
      "2024-10-07 13:33:30.521064: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 13:33:30.521117: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/yolov8m_r320\n",
      "2024-10-07 13:33:30.579197: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 13:33:30.855984: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/yolov8m_r320\n",
      "2024-10-07 13:33:31.005580: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 813761 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 200, Total Ops 734, % non-converted = 27.25 %\n",
      " * 200 ARITH ops\n",
      "\n",
      "- arith.constant:  200 occurrences  (f32: 171, i32: 29)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 14)\n",
      "  (f32: 19)\n",
      "  (f32: 84)\n",
      "  (f32: 78)\n",
      "  (f32: 3)\n",
      "  (f32: 79)\n",
      "  (f32: 54)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 1)\n",
      "  (f32: 1)\n",
      "  (f32: 18)\n",
      "  (f32: 2)\n",
      "  (f32: 171)\n",
      "2024-10-07 13:33:32.459528: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 20.535 G  ops, equivalently 10.268 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 13:33:46.856125: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 20.535 G  ops, equivalently 10.268 G  MACs\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/nn/modules/head.py:99: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  if self.dynamic or self.shape != shape:\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/utils/tal.py:308: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results).\n",
      "  for i, stride in enumerate(strides):\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_316_x, add_157_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolov8l_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolov8l_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/yolov8l_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 13:34:55.099763: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 13:34:55.099819: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 13:34:55.100073: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/yolov8l_r320\n",
      "2024-10-07 13:34:55.157183: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 13:34:55.157225: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/yolov8l_r320\n",
      "2024-10-07 13:34:55.233333: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 13:34:55.512623: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/yolov8l_r320\n",
      "2024-10-07 13:34:55.773486: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 673415 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 239, Total Ops 899, % non-converted = 26.59 %\n",
      " * 239 ARITH ops\n",
      "\n",
      "- arith.constant:  239 occurrences  (f32: 211, i32: 28)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 20)\n",
      "  (f32: 19)\n",
      "  (f32: 104)\n",
      "  (f32: 98)\n",
      "  (f32: 3)\n",
      "  (f32: 99)\n",
      "  (f32: 74)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 1)\n",
      "  (f32: 1)\n",
      "  (f32: 18)\n",
      "  (f32: 2)\n",
      "  (f32: 211)\n",
      "2024-10-07 13:34:58.067738: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 42.521 G  ops, equivalently 21.260 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 13:35:20.934601: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 42.521 G  ops, equivalently 21.260 G  MACs\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/nn/modules/head.py:99: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  if self.dynamic or self.shape != shape:\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/utils/tal.py:308: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results).\n",
      "  for i, stride in enumerate(strides):\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_316_x, add_157_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolov8x_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolov8x_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/yolov8x_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 13:36:33.384906: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 13:36:33.384943: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 13:36:33.385151: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/yolov8x_r320\n",
      "2024-10-07 13:36:33.458288: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 13:36:33.458329: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/yolov8x_r320\n",
      "2024-10-07 13:36:33.537227: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 13:36:34.074572: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/yolov8x_r320\n",
      "2024-10-07 13:36:34.636207: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 1251055 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 239, Total Ops 899, % non-converted = 26.59 %\n",
      " * 239 ARITH ops\n",
      "\n",
      "- arith.constant:  239 occurrences  (f32: 211, i32: 28)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 20)\n",
      "  (f32: 19)\n",
      "  (f32: 104)\n",
      "  (f32: 98)\n",
      "  (f32: 3)\n",
      "  (f32: 99)\n",
      "  (f32: 74)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 1)\n",
      "  (f32: 1)\n",
      "  (f32: 18)\n",
      "  (f32: 2)\n",
      "  (f32: 211)\n",
      "2024-10-07 13:36:37.998686: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 65.988 G  ops, equivalently 32.994 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 13:37:11.629566: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 65.988 G  ops, equivalently 32.994 G  MACs\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/nn/modules/head.py:99: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  if self.dynamic or self.shape != shape:\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/utils/tal.py:308: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results).\n",
      "  for i, stride in enumerate(strides):\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_268_x, add_141_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolo11s_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolo11s_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/yolo11s_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 13:37:49.948148: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 13:37:49.948201: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 13:37:49.948441: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/yolo11s_r320\n",
      "2024-10-07 13:37:49.978563: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 13:37:49.978598: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/yolo11s_r320\n",
      "2024-10-07 13:37:50.010627: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 13:37:50.137136: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/yolo11s_r320\n",
      "2024-10-07 13:37:50.265642: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 317203 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 213, Total Ops 753, % non-converted = 28.29 %\n",
      " * 213 ARITH ops\n",
      "\n",
      "- arith.constant:  213 occurrences  (f32: 180, i32: 33)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 16)\n",
      "  (f32: 2)\n",
      "  (f32: 23)\n",
      "  (f32: 81)\n",
      "  (f32: 7)\n",
      "  (f32: 78)\n",
      "  (f32: 3)\n",
      "  (f32: 80)\n",
      "  (f32: 37)\n",
      "  (f32: 8)\n",
      "  (f32: 2)\n",
      "  (f32: 2)\n",
      "  (f32: 3)\n",
      "  (f32: 18)\n",
      "  (f32: 2)\n",
      "  (f32: 175)\n",
      "2024-10-07 13:37:51.110138: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.892 G  ops, equivalently 2.946 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 13:37:57.908133: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.892 G  ops, equivalently 2.946 G  MACs\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/utils/tal.py:308: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results).\n",
      "  for i, stride in enumerate(strides):\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_343_x, add_169_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolo11m_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolo11m_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/yolo11m_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 13:38:42.271120: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 13:38:42.271159: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 13:38:42.271439: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/yolo11m_r320\n",
      "2024-10-07 13:38:42.304595: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 13:38:42.304635: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/yolo11m_r320\n",
      "2024-10-07 13:38:42.352406: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 13:38:42.544356: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/yolo11m_r320\n",
      "2024-10-07 13:38:42.745759: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 474321 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 262, Total Ops 942, % non-converted = 27.81 %\n",
      " * 262 ARITH ops\n",
      "\n",
      "- arith.constant:  262 occurrences  (f32: 230, i32: 32)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 21)\n",
      "  (f32: 2)\n",
      "  (f32: 28)\n",
      "  (f32: 106)\n",
      "  (f32: 7)\n",
      "  (f32: 103)\n",
      "  (f32: 3)\n",
      "  (f32: 105)\n",
      "  (f32: 47)\n",
      "  (f32: 8)\n",
      "  (f32: 2)\n",
      "  (f32: 2)\n",
      "  (f32: 3)\n",
      "  (f32: 18)\n",
      "  (f32: 2)\n",
      "  (f32: 220)\n",
      "2024-10-07 13:38:44.132244: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 18.123 G  ops, equivalently 9.062 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 13:38:58.664670: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 18.123 G  ops, equivalently 9.062 G  MACs\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/utils/tal.py:308: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results).\n",
      "  for i, stride in enumerate(strides):\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_526_x, add_233_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolo11l_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolo11l_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/yolo11l_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 13:40:20.893971: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 13:40:20.894028: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 13:40:20.894323: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/yolo11l_r320\n",
      "2024-10-07 13:40:21.299655: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 13:40:21.299697: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/yolo11l_r320\n",
      "2024-10-07 13:40:21.347965: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 13:40:21.535922: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/yolo11l_r320\n",
      "2024-10-07 13:40:21.723919: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 829599 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 384, Total Ops 1420, % non-converted = 27.04 %\n",
      " * 384 ARITH ops\n",
      "\n",
      "- arith.constant:  384 occurrences  (f32: 352, i32: 32)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 40)\n",
      "  (f32: 4)\n",
      "  (f32: 36)\n",
      "  (f32: 166)\n",
      "  (f32: 8)\n",
      "  (f32: 160)\n",
      "  (f32: 3)\n",
      "  (f32: 163)\n",
      "  (f32: 80)\n",
      "  (f32: 11)\n",
      "  (f32: 2)\n",
      "  (f32: 3)\n",
      "  (f32: 4)\n",
      "  (f32: 18)\n",
      "  (f32: 2)\n",
      "  (f32: 333)\n",
      "2024-10-07 13:40:23.601768: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 23.174 G  ops, equivalently 11.587 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 13:40:43.238084: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 23.174 G  ops, equivalently 11.587 G  MACs\n",
      "/mnt/ssd2/xxx/repo/ultralytics/ultralytics/utils/tal.py:308: TracerWarning: Iterating over a tensor might cause the trace to be incorrect. Passing a tensor of different shape won't change the number of iterations executed (and might lead to errors or silently give incorrect results).\n",
      "  for i, stride in enumerate(strides):\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_526_x, add_233_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolo11x_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/yolo11x_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/yolo11x_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 13:42:27.180867: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 13:42:27.180928: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 13:42:27.181191: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/yolo11x_r320\n",
      "2024-10-07 13:42:27.938672: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 13:42:27.938731: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/yolo11x_r320\n",
      "2024-10-07 13:42:28.046349: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 13:42:28.453514: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/yolo11x_r320\n",
      "2024-10-07 13:42:28.712211: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 1531020 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 384, Total Ops 1420, % non-converted = 27.04 %\n",
      " * 384 ARITH ops\n",
      "\n",
      "- arith.constant:  384 occurrences  (f32: 352, i32: 32)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 40)\n",
      "  (f32: 4)\n",
      "  (f32: 36)\n",
      "  (f32: 166)\n",
      "  (f32: 8)\n",
      "  (f32: 160)\n",
      "  (f32: 3)\n",
      "  (f32: 163)\n",
      "  (f32: 80)\n",
      "  (f32: 11)\n",
      "  (f32: 2)\n",
      "  (f32: 3)\n",
      "  (f32: 4)\n",
      "  (f32: 18)\n",
      "  (f32: 2)\n",
      "  (f32: 333)\n",
      "2024-10-07 13:42:33.459613: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 50.886 G  ops, equivalently 25.443 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 13:43:08.821406: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 50.886 G  ops, equivalently 25.443 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from ultralytics import YOLO\n",
    "import os\n",
    "\n",
    "MODEL_FOLDER = \"/mnt/ssd2/xxx/repo/ultralytics/customization\"\n",
    "MODEL_NAMES = MODEL_NAMES = [\n",
    "    \"yolov8n\",\n",
    "    \"yolov8s\",\n",
    "    \"yolov8m\",\n",
    "    \"yolov8l\",\n",
    "    \"yolov8x\",\n",
    "    \"yolo11n\",\n",
    "    \"yolo11s\",\n",
    "    \"yolo11m\",\n",
    "    \"yolo11l\",\n",
    "    \"yolo11x\",\n",
    "]\n",
    "for model_name in MODEL_NAMES:\n",
    "    model_path = os.path.join(MODEL_FOLDER, model_name + \".pt\")\n",
    "    model = YOLO(model_path).model\n",
    "\n",
    "    image_size = (320, 320)\n",
    "    model_name = f\"{model_name}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initilized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Mobilenetv4FPN"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### c1"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### num_outs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "2024-10-14 00:33:09.966897: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-14 00:33:09.969918: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-14 00:33:09.973504: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-14 00:33:09.979786: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-14 00:33:09.981508: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-14 00:33:09.984134: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-14 00:33:10.426179: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-14 00:33:10.427946: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-14 00:33:10.430195: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-14 00:33:10.431757: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 44685 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:81:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_o2_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_o2_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:33:22.020518: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:33:22.020555: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:33:22.021185: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_o2_oc64_r320\n",
      "2024-10-14 00:33:22.056132: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:33:22.056165: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_o2_oc64_r320\n",
      "2024-10-14 00:33:22.070242: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-14 00:33:22.072488: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:33:22.135441: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_o2_oc64_r320\n",
      "2024-10-14 00:33:22.172512: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 151328 microseconds.\n",
      "2024-10-14 00:33:22.229438: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 106, Total Ops 242, % non-converted = 43.80 %\n",
      " * 106 ARITH ops\n",
      "\n",
      "- arith.constant:  106 occurrences  (f32: 100, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 12)\n",
      "  (f32: 37)\n",
      "  (f32: 15)\n",
      "  (f32: 22)\n",
      "  (f32: 1)\n",
      "  (f32: 2)\n",
      "  (f32: 1)\n",
      "  (f32: 43)\n",
      "2024-10-14 00:33:22.398129: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 825.872 M  ops, equivalently 412.936 M  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:33:23.577065: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 825.872 M  ops, equivalently 412.936 M  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_o2_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_o2_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:33:33.014329: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:33:33.014361: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:33:33.014599: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_o2_oc96_r320\n",
      "2024-10-14 00:33:33.175437: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:33:33.175469: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_o2_oc96_r320\n",
      "2024-10-14 00:33:33.188578: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:33:33.261786: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_o2_oc96_r320\n",
      "2024-10-14 00:33:33.317578: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 302981 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 168, Total Ops 385, % non-converted = 43.64 %\n",
      " * 168 ARITH ops\n",
      "\n",
      "- arith.constant:  168 occurrences  (f32: 162, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 20)\n",
      "  (f32: 53)\n",
      "  (f32: 30)\n",
      "  (f32: 36)\n",
      "  (f32: 1)\n",
      "  (f32: 2)\n",
      "  (f32: 1)\n",
      "  (f32: 71)\n",
      "2024-10-14 00:33:33.792512: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 3.522 G  ops, equivalently 1.761 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:33:37.239128: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 3.522 G  ops, equivalently 1.761 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_o2_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_o2_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:33:53.943178: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:33:53.943222: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:33:53.943466: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_o2_oc128_r320\n",
      "2024-10-14 00:33:53.975454: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:33:53.975498: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_o2_oc128_r320\n",
      "2024-10-14 00:33:54.052455: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:33:54.259157: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_o2_oc128_r320\n",
      "2024-10-14 00:33:54.457335: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 513892 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 218, Total Ops 510, % non-converted = 42.75 %\n",
      " * 218 ARITH ops\n",
      "\n",
      "- arith.constant:  218 occurrences  (f32: 212, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 25)\n",
      "  (f32: 63)\n",
      "  (f32: 45)\n",
      "  (f32: 51)\n",
      "  (f32: 1)\n",
      "  (f32: 2)\n",
      "  (f32: 1)\n",
      "  (f32: 101)\n",
      "2024-10-14 00:33:55.954061: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 9.092 G  ops, equivalently 4.546 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:34:04.352788: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 9.092 G  ops, equivalently 4.546 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "num_outs_list = [\n",
    "    2,\n",
    "    3,\n",
    "    4,\n",
    "    5,\n",
    "]\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"mm\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for num_outs in num_outs_list:\n",
    "    for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "        backbone_value = backbone_dict[backbone_key]\n",
    "        model = OCDFPN(\n",
    "            backbone=backbone_value,\n",
    "            n_classes=1,\n",
    "            num_outs=num_outs,\n",
    "            out_channel=out_channel,\n",
    "            fpn_type=fpn_type,\n",
    "        )\n",
    "\n",
    "        model_name = f\"fpn_mobilenetv4_conv_{backbone_key}_o{num_outs}_oc{out_channel}_r{image_size[0]}\"\n",
    "        torch_model_path = None  # use initialized model\n",
    "        onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "        tf_folder_path = f\"saved_model/{model_name}\"\n",
    "        tflite_model_path = onnx_model_path.replace(\n",
    "            \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "        )\n",
    "        calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "        converter = Converter(\n",
    "            model,\n",
    "            image_size,\n",
    "            torch_model_path,\n",
    "            onnx_model_path,\n",
    "            tf_folder_path,\n",
    "            tflite_model_path,\n",
    "            calib_data_path,\n",
    "            opset_version=11,\n",
    "        )\n",
    "        converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: mm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.17 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "2024-10-07 23:12:54.614869: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:54.616654: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:54.773695: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:55.336837: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:55.355133: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:55.356983: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:55.358519: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:55.362231: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:55.363863: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:55.365337: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:55.366710: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:55.368054: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.927541: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.928635: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.930117: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.931165: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.932420: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.933357: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.934768: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.936200: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.937470: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.938493: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.940033: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.940963: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.989191: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.990856: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.992475: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.993511: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.995158: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:57.998850: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:58.002326: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:58.003354: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:58.005089: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:58.006226: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 39370 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:01:00.0, compute capability: 8.0\n",
      "2024-10-07 23:12:58.006576: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:58.009505: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 25969 MB memory:  -> device: 1, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:41:00.0, compute capability: 8.0\n",
      "2024-10-07 23:12:58.009809: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:58.010781: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 36682 MB memory:  -> device: 2, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:81:00.0, compute capability: 8.0\n",
      "2024-10-07 23:12:58.011050: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-07 23:12:58.012409: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 19760 MB memory:  -> device: 3, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:c1:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:13:11.868508: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:13:11.868542: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:13:11.869415: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_o4_oc64_r320\n",
      "2024-10-07 23:13:11.874769: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:13:11.874788: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_o4_oc64_r320\n",
      "2024-10-07 23:13:11.892493: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-07 23:13:11.895579: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:13:12.002793: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_o4_oc64_r320\n",
      "2024-10-07 23:13:12.074609: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 205196 microseconds.\n",
      "2024-10-07 23:13:12.195860: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 117, Total Ops 296, % non-converted = 39.53 %\n",
      " * 117 ARITH ops\n",
      "\n",
      "- arith.constant:  117 occurrences  (f32: 109, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 16)\n",
      "  (f32: 46)\n",
      "  (f32: 15)\n",
      "  (f32: 29)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 57)\n",
      "2024-10-07 23:13:12.389979: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.819 G  ops, equivalently 0.910 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:13:14.119128: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.819 G  ops, equivalently 0.910 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:13:25.145328: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:13:25.145365: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:13:25.145566: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_o4_oc96_r320\n",
      "2024-10-07 23:13:25.158339: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:13:25.158373: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_o4_oc96_r320\n",
      "2024-10-07 23:13:25.175796: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:13:25.280557: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_o4_oc96_r320\n",
      "2024-10-07 23:13:25.361692: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 216126 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 179, Total Ops 439, % non-converted = 40.77 %\n",
      " * 179 ARITH ops\n",
      "\n",
      "- arith.constant:  179 occurrences  (f32: 171, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 24)\n",
      "  (f32: 62)\n",
      "  (f32: 30)\n",
      "  (f32: 43)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 85)\n",
      "2024-10-07 23:13:25.927769: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.445 G  ops, equivalently 2.723 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:13:30.005580: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.445 G  ops, equivalently 2.723 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:13:48.543634: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:13:48.543671: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:13:48.543883: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_o4_oc128_r320\n",
      "2024-10-07 23:13:48.579576: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:13:48.579612: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_o4_oc128_r320\n",
      "2024-10-07 23:13:48.678962: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:13:48.919250: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_o4_oc128_r320\n",
      "2024-10-07 23:13:49.159748: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 615865 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 229, Total Ops 564, % non-converted = 40.60 %\n",
      " * 229 ARITH ops\n",
      "\n",
      "- arith.constant:  229 occurrences  (f32: 221, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 29)\n",
      "  (f32: 72)\n",
      "  (f32: 45)\n",
      "  (f32: 58)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 115)\n",
      "2024-10-07 23:13:51.191541: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 12.053 G  ops, equivalently 6.027 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:14:01.210104: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 12.053 G  ops, equivalently 6.027 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"mm\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=1,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_mobilenetv4_conv_{backbone_key}_o{num_outs}_oc{out_channel}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: extra_dw"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:54:25.545094: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:54:25.545129: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:54:25.545325: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320\n",
      "2024-10-07 23:54:25.552890: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:54:25.552916: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320\n",
      "2024-10-07 23:54:25.574226: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:54:25.672666: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320\n",
      "2024-10-07 23:54:25.771914: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 226590 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 152, Total Ops 404, % non-converted = 37.62 %\n",
      " * 152 ARITH ops\n",
      "\n",
      "- arith.constant:  152 occurrences  (f32: 144, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 57)\n",
      "  (f32: 37)\n",
      "  (f32: 40)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 79)\n",
      "2024-10-07 23:54:26.218970: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.799 G  ops, equivalently 0.900 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:54:29.545193: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.799 G  ops, equivalently 0.900 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:54:43.112689: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:54:43.112726: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:54:43.112925: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320\n",
      "2024-10-07 23:54:43.128017: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:54:43.128057: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320\n",
      "2024-10-07 23:54:43.153077: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:54:43.255156: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320\n",
      "2024-10-07 23:54:43.357443: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 244520 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 213, Total Ops 546, % non-converted = 39.01 %\n",
      " * 213 ARITH ops\n",
      "\n",
      "- arith.constant:  213 occurrences  (f32: 205, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 31)\n",
      "  (f32: 73)\n",
      "  (f32: 52)\n",
      "  (f32: 54)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 107)\n",
      "2024-10-07 23:54:44.008570: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.416 G  ops, equivalently 2.708 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:54:50.088051: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.416 G  ops, equivalently 2.708 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:55:09.643408: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:55:09.643446: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:55:09.643656: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320\n",
      "2024-10-07 23:55:09.690019: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:55:09.690063: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320\n",
      "2024-10-07 23:55:09.736325: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:55:09.877807: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320\n",
      "2024-10-07 23:55:10.010333: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 366679 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 264, Total Ops 672, % non-converted = 39.29 %\n",
      " * 264 ARITH ops\n",
      "\n",
      "- arith.constant:  264 occurrences  (f32: 256, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 36)\n",
      "  (f32: 83)\n",
      "  (f32: 67)\n",
      "  (f32: 69)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 137)\n",
      "2024-10-07 23:55:11.665715: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 12.125 G  ops, equivalently 6.062 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:55:23.726050: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 12.125 G  ops, equivalently 6.062 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"extra_dw\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=1,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: ib"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_ib_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_ib_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:55:33.741224: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:55:33.741266: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:55:33.741471: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_ib_o4_oc64_r320\n",
      "2024-10-07 23:55:33.747605: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:55:33.747629: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_ib_o4_oc64_r320\n",
      "2024-10-07 23:55:33.760903: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:55:33.839003: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_ib_o4_oc64_r320\n",
      "2024-10-07 23:55:33.926386: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 184917 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 141, Total Ops 349, % non-converted = 40.40 %\n",
      " * 141 ARITH ops\n",
      "\n",
      "- arith.constant:  141 occurrences  (f32: 133, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 57)\n",
      "  (f32: 26)\n",
      "  (f32: 29)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 57)\n",
      "2024-10-07 23:55:34.256172: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.661 G  ops, equivalently 0.831 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:55:36.863611: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.661 G  ops, equivalently 0.831 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_ib_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_ib_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:55:48.679599: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:55:48.679631: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:55:48.679814: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_ib_o4_oc96_r320\n",
      "2024-10-07 23:55:48.694049: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:55:48.694089: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_ib_o4_oc96_r320\n",
      "2024-10-07 23:55:48.716619: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:55:48.807960: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_ib_o4_oc96_r320\n",
      "2024-10-07 23:55:48.897482: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 217669 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 202, Total Ops 491, % non-converted = 41.14 %\n",
      " * 202 ARITH ops\n",
      "\n",
      "- arith.constant:  202 occurrences  (f32: 194, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 31)\n",
      "  (f32: 73)\n",
      "  (f32: 41)\n",
      "  (f32: 43)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 85)\n",
      "2024-10-07 23:55:49.437772: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.215 G  ops, equivalently 2.608 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:55:54.854354: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.215 G  ops, equivalently 2.608 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_ib_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_ib_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:56:13.566254: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:56:13.566287: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:56:13.566474: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_ib_o4_oc128_r320\n",
      "2024-10-07 23:56:13.600959: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:56:13.600993: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_ib_o4_oc128_r320\n",
      "2024-10-07 23:56:13.641170: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:56:13.822866: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_ib_o4_oc128_r320\n",
      "2024-10-07 23:56:13.988943: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 422469 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 253, Total Ops 617, % non-converted = 41.00 %\n",
      " * 253 ARITH ops\n",
      "\n",
      "- arith.constant:  253 occurrences  (f32: 245, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 36)\n",
      "  (f32: 83)\n",
      "  (f32: 56)\n",
      "  (f32: 58)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 115)\n",
      "2024-10-07 23:56:15.343938: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 11.865 G  ops, equivalently 5.932 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:56:26.468349: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 11.865 G  ops, equivalently 5.932 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"ib\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=1,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "2024-10-11 14:28:35.403329: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.404816: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.406397: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.407771: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.414894: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.416502: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.418356: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.419538: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.420547: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.421963: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.423448: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:35.424650: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.676181: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.677549: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.680667: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.681943: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.682955: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.684156: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.685523: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.686708: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.687647: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.689740: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.691406: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.693795: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.755310: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.756751: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.760001: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.761410: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.762438: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.763693: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.765571: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.766843: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.767786: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.769092: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 11464 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:01:00.0, compute capability: 8.0\n",
      "2024-10-11 14:28:36.769435: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.770980: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 23144 MB memory:  -> device: 1, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:41:00.0, compute capability: 8.0\n",
      "2024-10-11 14:28:36.771247: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.773346: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 17016 MB memory:  -> device: 2, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:81:00.0, compute capability: 8.0\n",
      "2024-10-11 14:28:36.773559: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:28:36.774492: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 807 MB memory:  -> device: 3, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:c1:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_convnext_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_convnext_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:28:55.583380: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:28:55.583426: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:28:55.584919: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_convnext_o4_oc64_r320\n",
      "2024-10-11 14:28:55.594771: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:28:55.594835: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_convnext_o4_oc64_r320\n",
      "2024-10-11 14:28:55.732877: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-11 14:28:55.738673: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:28:55.979216: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_convnext_o4_oc64_r320\n",
      "2024-10-11 14:28:56.152315: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 567400 microseconds.\n",
      "2024-10-11 14:28:56.433966: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 132, Total Ops 328, % non-converted = 40.24 %\n",
      " * 132 ARITH ops\n",
      "\n",
      "- arith.constant:  132 occurrences  (f32: 124, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 19)\n",
      "  (f32: 53)\n",
      "  (f32: 22)\n",
      "  (f32: 29)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 57)\n",
      "2024-10-11 14:28:56.832554: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.692 G  ops, equivalently 0.846 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:28:58.903054: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.692 G  ops, equivalently 0.846 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:29:25.787155: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:29:25.787191: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:29:25.787422: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320\n",
      "2024-10-11 14:29:25.965029: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:29:25.965071: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320\n",
      "2024-10-11 14:29:25.985615: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:29:26.105896: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320\n",
      "2024-10-11 14:29:26.204523: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 417102 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 195, Total Ops 472, % non-converted = 41.31 %\n",
      " * 195 ARITH ops\n",
      "\n",
      "- arith.constant:  195 occurrences  (f32: 187, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 27)\n",
      "  (f32: 69)\n",
      "  (f32: 37)\n",
      "  (f32: 43)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 85)\n",
      "2024-10-11 14:29:26.892485: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.376 G  ops, equivalently 2.688 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:29:32.103761: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.376 G  ops, equivalently 2.688 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_convnext_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_convnext_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:30:11.077207: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:30:11.077274: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:30:11.077600: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_convnext_o4_oc128_r320\n",
      "2024-10-11 14:30:11.274288: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:30:11.274335: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_convnext_o4_oc128_r320\n",
      "2024-10-11 14:30:11.402037: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:30:11.719636: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_convnext_o4_oc128_r320\n",
      "2024-10-11 14:30:12.033914: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 956316 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 245, Total Ops 597, % non-converted = 41.04 %\n",
      " * 245 ARITH ops\n",
      "\n",
      "- arith.constant:  245 occurrences  (f32: 237, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 32)\n",
      "  (f32: 79)\n",
      "  (f32: 52)\n",
      "  (f32: 58)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 115)\n",
      "2024-10-11 14:30:13.904487: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 12.249 G  ops, equivalently 6.124 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:30:28.171273: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 12.249 G  ops, equivalently 6.124 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"convnext\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=1,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### c80"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### num_outs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_o3_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_o3_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:12:11.502095: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:12:11.502130: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:12:11.502337: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_o3_oc64_r320_coco\n",
      "2024-10-14 00:12:11.507660: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:12:11.507679: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_o3_oc64_r320_coco\n",
      "2024-10-14 00:12:11.517941: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:12:11.572520: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_o3_oc64_r320_coco\n",
      "2024-10-14 00:12:11.627013: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 124676 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 110, Total Ops 265, % non-converted = 41.51 %\n",
      " * 110 ARITH ops\n",
      "\n",
      "- arith.constant:  110 occurrences  (f32: 104, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 14)\n",
      "  (f32: 41)\n",
      "  (f32: 15)\n",
      "  (f32: 25)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 50)\n",
      "2024-10-14 00:12:11.906371: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.032 G  ops, equivalently 0.516 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:12:13.125478: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.032 G  ops, equivalently 0.516 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_o3_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_o3_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:12:24.872136: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:12:24.872179: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:12:24.872430: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_o3_oc96_r320_coco\n",
      "2024-10-14 00:12:25.043845: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:12:25.043881: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_o3_oc96_r320_coco\n",
      "2024-10-14 00:12:25.062361: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:12:25.135436: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_o3_oc96_r320_coco\n",
      "2024-10-14 00:12:25.205911: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 333481 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 171, Total Ops 407, % non-converted = 42.01 %\n",
      " * 171 ARITH ops\n",
      "\n",
      "- arith.constant:  171 occurrences  (f32: 165, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 22)\n",
      "  (f32: 57)\n",
      "  (f32: 30)\n",
      "  (f32: 39)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 78)\n",
      "2024-10-14 00:12:25.706317: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 4.225 G  ops, equivalently 2.112 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:12:29.477888: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 4.225 G  ops, equivalently 2.112 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_o3_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_o3_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:12:50.352374: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:12:50.352414: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:12:50.352650: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_o3_oc128_r320_coco\n",
      "2024-10-14 00:12:51.104136: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:12:51.104179: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_o3_oc128_r320_coco\n",
      "2024-10-14 00:12:51.134984: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:12:51.231219: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_o3_oc128_r320_coco\n",
      "2024-10-14 00:12:51.326798: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 974150 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 221, Total Ops 532, % non-converted = 41.54 %\n",
      " * 221 ARITH ops\n",
      "\n",
      "- arith.constant:  221 occurrences  (f32: 215, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 27)\n",
      "  (f32: 67)\n",
      "  (f32: 45)\n",
      "  (f32: 54)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 108)\n",
      "2024-10-14 00:12:52.656488: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 10.347 G  ops, equivalently 5.174 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:13:01.287469: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 10.347 G  ops, equivalently 5.174 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_o5_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_o5_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:13:13.604358: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:13:13.604395: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:13:13.604695: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_o5_oc64_r320_coco\n",
      "2024-10-14 00:13:13.647091: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:13:13.647125: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_o5_oc64_r320_coco\n",
      "2024-10-14 00:13:13.665271: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:13:13.762727: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_o5_oc64_r320_coco\n",
      "2024-10-14 00:13:13.863232: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 258595 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 123, Total Ops 331, % non-converted = 37.16 %\n",
      " * 123 ARITH ops\n",
      "\n",
      "- arith.constant:  123 occurrences  (f32: 115, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 52)\n",
      "  (f32: 15)\n",
      "  (f32: 34)\n",
      "  (f32: 14)\n",
      "  (f32: 4)\n",
      "  (f32: 68)\n",
      "2024-10-14 00:13:14.274420: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.251 G  ops, equivalently 2.625 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:13:18.107251: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.251 G  ops, equivalently 2.625 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_o5_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_o5_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:13:33.620580: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:13:33.620611: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:13:33.620852: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_o5_oc96_r320_coco\n",
      "2024-10-14 00:13:33.636245: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:13:33.636274: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_o5_oc96_r320_coco\n",
      "2024-10-14 00:13:33.659352: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:13:33.800581: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_o5_oc96_r320_coco\n",
      "2024-10-14 00:13:33.968402: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 347550 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 184, Total Ops 473, % non-converted = 38.90 %\n",
      " * 184 ARITH ops\n",
      "\n",
      "- arith.constant:  184 occurrences  (f32: 176, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 26)\n",
      "  (f32: 68)\n",
      "  (f32: 30)\n",
      "  (f32: 48)\n",
      "  (f32: 14)\n",
      "  (f32: 4)\n",
      "  (f32: 96)\n",
      "2024-10-14 00:13:34.587312: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 19.168 G  ops, equivalently 9.584 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:13:43.461289: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 19.168 G  ops, equivalently 9.584 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_o5_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_o5_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:14:06.887356: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:14:06.887405: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:14:06.887749: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_o5_oc128_r320_coco\n",
      "2024-10-14 00:14:07.578697: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:14:07.578761: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_o5_oc128_r320_coco\n",
      "2024-10-14 00:14:07.648176: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:14:07.926617: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_o5_oc128_r320_coco\n",
      "2024-10-14 00:14:08.108664: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 1220921 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 234, Total Ops 598, % non-converted = 39.13 %\n",
      " * 234 ARITH ops\n",
      "\n",
      "- arith.constant:  234 occurrences  (f32: 226, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 31)\n",
      "  (f32: 78)\n",
      "  (f32: 45)\n",
      "  (f32: 63)\n",
      "  (f32: 14)\n",
      "  (f32: 4)\n",
      "  (f32: 126)\n",
      "2024-10-14 00:14:09.535318: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 36.589 G  ops, equivalently 18.295 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:14:25.923340: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 36.589 G  ops, equivalently 18.295 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "num_outs_list = [\n",
    "    3,\n",
    "    #  4,\n",
    "    5,\n",
    "]\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"mm\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for num_outs in num_outs_list:\n",
    "    for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "        backbone_value = backbone_dict[backbone_key]\n",
    "        model = OCDFPN(\n",
    "            backbone=backbone_value,\n",
    "            n_classes=80,\n",
    "            num_outs=num_outs,\n",
    "            out_channel=out_channel,\n",
    "            fpn_type=fpn_type,\n",
    "        )\n",
    "\n",
    "        model_name = f\"fpn_mobilenetv4_conv_{backbone_key}_o{num_outs}_oc{out_channel}_r{image_size[0]}_coco\"\n",
    "        torch_model_path = None  # use initialized model\n",
    "        onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "        tf_folder_path = f\"saved_model/{model_name}\"\n",
    "        tflite_model_path = onnx_model_path.replace(\n",
    "            \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "        )\n",
    "        calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "        converter = Converter(\n",
    "            model,\n",
    "            image_size,\n",
    "            torch_model_path,\n",
    "            onnx_model_path,\n",
    "            tf_folder_path,\n",
    "            tflite_model_path,\n",
    "            calib_data_path,\n",
    "            opset_version=11,\n",
    "        )\n",
    "        converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: mm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:59:20.708342: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:59:20.708408: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:59:20.708780: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_o4_oc64_r320_coco\n",
      "2024-10-11 14:59:20.740735: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:59:20.740784: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_o4_oc64_r320_coco\n",
      "2024-10-11 14:59:20.782741: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:59:20.921163: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_o4_oc64_r320_coco\n",
      "2024-10-11 14:59:21.074426: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 365648 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 116, Total Ops 295, % non-converted = 39.32 %\n",
      " * 116 ARITH ops\n",
      "\n",
      "- arith.constant:  116 occurrences  (f32: 109, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 16)\n",
      "  (f32: 46)\n",
      "  (f32: 15)\n",
      "  (f32: 29)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 58)\n",
      "2024-10-11 14:59:21.679853: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.852 G  ops, equivalently 0.926 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:59:23.781435: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.852 G  ops, equivalently 0.926 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:59:50.599873: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:59:50.599946: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:59:50.600268: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_o4_oc96_r320_coco\n",
      "2024-10-11 14:59:50.620839: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:59:50.620887: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_o4_oc96_r320_coco\n",
      "2024-10-11 14:59:50.661074: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:59:50.828574: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_o4_oc96_r320_coco\n",
      "2024-10-11 14:59:50.977481: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 377213 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 177, Total Ops 437, % non-converted = 40.50 %\n",
      " * 177 ARITH ops\n",
      "\n",
      "- arith.constant:  177 occurrences  (f32: 170, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 24)\n",
      "  (f32: 62)\n",
      "  (f32: 30)\n",
      "  (f32: 43)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 86)\n",
      "2024-10-11 14:59:52.269822: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.016 G  ops, equivalently 3.508 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:59:57.598304: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.016 G  ops, equivalently 3.508 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:00:46.686188: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:00:46.686238: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:00:46.686511: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_o4_oc128_r320_coco\n",
      "2024-10-11 15:00:47.394186: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:00:47.394247: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_o4_oc128_r320_coco\n",
      "2024-10-11 15:00:47.440300: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:00:47.569664: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_o4_oc128_r320_coco\n",
      "2024-10-11 15:00:47.697275: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 1010766 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 227, Total Ops 562, % non-converted = 40.39 %\n",
      " * 227 ARITH ops\n",
      "\n",
      "- arith.constant:  227 occurrences  (f32: 220, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 29)\n",
      "  (f32: 72)\n",
      "  (f32: 45)\n",
      "  (f32: 58)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 116)\n",
      "2024-10-11 15:00:49.176084: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.249 G  ops, equivalently 7.625 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:01:00.405550: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.249 G  ops, equivalently 7.625 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"mm\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=80,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_mobilenetv4_conv_{backbone_key}_o{num_outs}_oc{out_channel}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: extra_dw"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:01:25.190933: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:01:25.190970: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:01:25.191312: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320_coco\n",
      "2024-10-11 15:01:25.235994: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:01:25.236036: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320_coco\n",
      "2024-10-11 15:01:25.254538: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:01:25.334730: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320_coco\n",
      "2024-10-11 15:01:25.415344: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 224033 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 150, Total Ops 402, % non-converted = 37.31 %\n",
      " * 150 ARITH ops\n",
      "\n",
      "- arith.constant:  150 occurrences  (f32: 143, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 57)\n",
      "  (f32: 37)\n",
      "  (f32: 40)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 80)\n",
      "2024-10-11 15:01:25.793185: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.434 G  ops, equivalently 0.717 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:01:28.606122: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.434 G  ops, equivalently 0.717 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:02:14.358221: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:02:14.358267: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:02:14.358555: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320_coco\n",
      "2024-10-11 15:02:14.514102: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:02:14.514137: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320_coco\n",
      "2024-10-11 15:02:14.546282: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:02:14.711251: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320_coco\n",
      "2024-10-11 15:02:14.888700: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 530146 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 211, Total Ops 548, % non-converted = 38.50 %\n",
      " * 211 ARITH ops\n",
      "\n",
      "- arith.constant:  211 occurrences  (f32: 204, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 35)\n",
      "  (f32: 73)\n",
      "  (f32: 52)\n",
      "  (f32: 54)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 108)\n",
      "2024-10-11 15:02:15.551198: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.237 G  ops, equivalently 2.618 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:02:21.840164: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.237 G  ops, equivalently 2.618 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:02:49.581277: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:02:49.581311: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:02:49.581501: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320_coco\n",
      "2024-10-11 15:02:49.617877: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:02:49.617916: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320_coco\n",
      "2024-10-11 15:02:49.668656: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:02:49.825195: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320_coco\n",
      "2024-10-11 15:02:49.996665: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 415164 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 261, Total Ops 673, % non-converted = 38.78 %\n",
      " * 261 ARITH ops\n",
      "\n",
      "- arith.constant:  261 occurrences  (f32: 254, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 40)\n",
      "  (f32: 83)\n",
      "  (f32: 67)\n",
      "  (f32: 69)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 138)\n",
      "2024-10-11 15:02:52.834466: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 12.032 G  ops, equivalently 6.016 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:03:10.219592: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 12.032 G  ops, equivalently 6.016 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"extra_dw\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=80,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: ib"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_ib_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_ib_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:03:33.512436: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:03:33.512489: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:03:33.512731: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_ib_o4_oc64_r320_coco\n",
      "2024-10-11 15:03:33.556725: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:03:33.556758: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_ib_o4_oc64_r320_coco\n",
      "2024-10-11 15:03:33.573319: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:03:33.657909: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_ib_o4_oc64_r320_coco\n",
      "2024-10-11 15:03:33.755952: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 243223 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 139, Total Ops 347, % non-converted = 40.06 %\n",
      " * 139 ARITH ops\n",
      "\n",
      "- arith.constant:  139 occurrences  (f32: 132, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 57)\n",
      "  (f32: 26)\n",
      "  (f32: 29)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 58)\n",
      "2024-10-11 15:03:34.177258: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.411 G  ops, equivalently 0.706 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:03:36.551191: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.411 G  ops, equivalently 0.706 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_ib_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_ib_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:03:56.846439: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:03:56.846496: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:03:56.846797: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_ib_o4_oc96_r320_coco\n",
      "2024-10-11 15:03:56.970887: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:03:56.970932: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_ib_o4_oc96_r320_coco\n",
      "2024-10-11 15:03:57.003953: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:03:57.169781: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_ib_o4_oc96_r320_coco\n",
      "2024-10-11 15:03:57.328040: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 481244 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 200, Total Ops 493, % non-converted = 40.57 %\n",
      " * 200 ARITH ops\n",
      "\n",
      "- arith.constant:  200 occurrences  (f32: 193, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 35)\n",
      "  (f32: 73)\n",
      "  (f32: 41)\n",
      "  (f32: 43)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 86)\n",
      "2024-10-11 15:03:58.557324: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.199 G  ops, equivalently 2.600 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:04:04.788464: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.199 G  ops, equivalently 2.600 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_ib_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_ib_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:04:48.116907: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:04:48.116946: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:04:48.117188: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_ib_o4_oc128_r320_coco\n",
      "2024-10-11 15:04:48.158373: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:04:48.158417: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_ib_o4_oc128_r320_coco\n",
      "2024-10-11 15:04:48.204857: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:04:48.361068: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_ib_o4_oc128_r320_coco\n",
      "2024-10-11 15:04:48.515727: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 398541 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 250, Total Ops 618, % non-converted = 40.45 %\n",
      " * 250 ARITH ops\n",
      "\n",
      "- arith.constant:  250 occurrences  (f32: 243, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 40)\n",
      "  (f32: 83)\n",
      "  (f32: 56)\n",
      "  (f32: 58)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 116)\n",
      "2024-10-11 15:04:50.144886: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 11.982 G  ops, equivalently 5.991 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:05:03.921848: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 11.982 G  ops, equivalently 5.991 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"ib\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=80,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_convnext_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_small_convnext_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:05:15.754573: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:05:15.754628: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:05:15.754879: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_small_convnext_o4_oc64_r320_coco\n",
      "2024-10-11 15:05:15.762121: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:05:15.762142: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_small_convnext_o4_oc64_r320_coco\n",
      "2024-10-11 15:05:15.774050: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:05:15.854100: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_small_convnext_o4_oc64_r320_coco\n",
      "2024-10-11 15:05:15.937802: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 182926 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 131, Total Ops 327, % non-converted = 40.06 %\n",
      " * 131 ARITH ops\n",
      "\n",
      "- arith.constant:  131 occurrences  (f32: 124, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 19)\n",
      "  (f32: 53)\n",
      "  (f32: 22)\n",
      "  (f32: 29)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 58)\n",
      "2024-10-11 15:05:16.322551: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.725 G  ops, equivalently 0.862 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:05:18.243081: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.725 G  ops, equivalently 0.862 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:05:45.234976: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:05:45.235042: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:05:45.235355: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320_coco\n",
      "2024-10-11 15:05:45.255684: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:05:45.255732: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320_coco\n",
      "2024-10-11 15:05:45.287705: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:05:45.382411: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320_coco\n",
      "2024-10-11 15:05:45.479196: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 243843 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 192, Total Ops 473, % non-converted = 40.59 %\n",
      " * 192 ARITH ops\n",
      "\n",
      "- arith.constant:  192 occurrences  (f32: 185, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 31)\n",
      "  (f32: 69)\n",
      "  (f32: 37)\n",
      "  (f32: 43)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 86)\n",
      "2024-10-11 15:05:46.262778: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.927 G  ops, equivalently 2.963 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:05:51.823086: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.927 G  ops, equivalently 2.963 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_convnext_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv4_conv_large_convnext_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:06:28.317601: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:06:28.317654: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:06:28.317871: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv4_conv_large_convnext_o4_oc128_r320_coco\n",
      "2024-10-11 15:06:28.598057: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:06:28.598096: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv4_conv_large_convnext_o4_oc128_r320_coco\n",
      "2024-10-11 15:06:28.632620: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:06:28.785790: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv4_conv_large_convnext_o4_oc128_r320_coco\n",
      "2024-10-11 15:06:28.977701: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 659832 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 242, Total Ops 598, % non-converted = 40.47 %\n",
      " * 242 ARITH ops\n",
      "\n",
      "- arith.constant:  242 occurrences  (f32: 235, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 36)\n",
      "  (f32: 79)\n",
      "  (f32: 52)\n",
      "  (f32: 58)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 116)\n",
      "2024-10-11 15:06:30.741668: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.301 G  ops, equivalently 6.650 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:06:45.912350: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.301 G  ops, equivalently 6.650 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"convnext\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=80,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Mobilenetv4PAFPN"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### c1\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### num_outs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o3_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o3_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:14:33.601753: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:14:33.601791: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:14:33.602055: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_o3_oc64_r320\n",
      "2024-10-14 00:14:33.626581: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:14:33.626613: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_o3_oc64_r320\n",
      "2024-10-14 00:14:33.640431: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:14:33.696363: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_o3_oc64_r320\n",
      "2024-10-14 00:14:33.749006: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 146966 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 115, Total Ops 282, % non-converted = 40.78 %\n",
      " * 115 ARITH ops\n",
      "\n",
      "- arith.constant:  115 occurrences  (f32: 108, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 16)\n",
      "  (f32: 45)\n",
      "  (f32: 15)\n",
      "  (f32: 27)\n",
      "  (f32: 1)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 53)\n",
      "2024-10-14 00:14:34.044060: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.098 G  ops, equivalently 0.549 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:14:35.313873: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.098 G  ops, equivalently 0.549 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o3_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o3_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:14:46.537919: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:14:46.537957: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:14:46.538237: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_o3_oc96_r320\n",
      "2024-10-14 00:14:46.687540: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:14:46.687572: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_o3_oc96_r320\n",
      "2024-10-14 00:14:46.705929: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:14:46.795932: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_o3_oc96_r320\n",
      "2024-10-14 00:14:46.873309: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 335074 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 177, Total Ops 425, % non-converted = 41.65 %\n",
      " * 177 ARITH ops\n",
      "\n",
      "- arith.constant:  177 occurrences  (f32: 170, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 24)\n",
      "  (f32: 61)\n",
      "  (f32: 30)\n",
      "  (f32: 41)\n",
      "  (f32: 1)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 81)\n",
      "2024-10-14 00:14:47.424425: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 4.075 G  ops, equivalently 2.037 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:14:50.986362: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 4.075 G  ops, equivalently 2.037 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o3_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o3_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:15:13.456329: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:15:13.456366: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:15:13.456587: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_o3_oc128_r320\n",
      "2024-10-14 00:15:13.493847: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:15:13.493887: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_o3_oc128_r320\n",
      "2024-10-14 00:15:13.585903: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:15:13.793387: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_o3_oc128_r320\n",
      "2024-10-14 00:15:13.912324: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 455740 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 227, Total Ops 550, % non-converted = 41.27 %\n",
      " * 227 ARITH ops\n",
      "\n",
      "- arith.constant:  227 occurrences  (f32: 220, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 29)\n",
      "  (f32: 71)\n",
      "  (f32: 45)\n",
      "  (f32: 56)\n",
      "  (f32: 1)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 111)\n",
      "2024-10-14 00:15:15.192803: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 9.992 G  ops, equivalently 4.996 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:15:24.646599: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 9.992 G  ops, equivalently 4.996 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o5_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o5_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:15:38.261534: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:15:38.261587: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:15:38.262013: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_o5_oc64_r320\n",
      "2024-10-14 00:15:38.310060: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:15:38.310107: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_o5_oc64_r320\n",
      "2024-10-14 00:15:38.339232: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:15:38.477853: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_o5_oc64_r320\n",
      "2024-10-14 00:15:38.611208: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 349320 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 132, Total Ops 364, % non-converted = 36.26 %\n",
      " * 132 ARITH ops\n",
      "\n",
      "- arith.constant:  132 occurrences  (f32: 123, i32: 9)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 22)\n",
      "  (f32: 60)\n",
      "  (f32: 15)\n",
      "  (f32: 38)\n",
      "  (f32: 1)\n",
      "  (f32: 14)\n",
      "  (f32: 4)\n",
      "  (f32: 75)\n",
      "2024-10-14 00:15:39.066535: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.374 G  ops, equivalently 3.187 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:15:43.212655: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.374 G  ops, equivalently 3.187 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o5_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o5_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:15:57.204775: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:15:57.204817: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:15:57.205085: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_o5_oc96_r320\n",
      "2024-10-14 00:15:57.381566: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:15:57.381601: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_o5_oc96_r320\n",
      "2024-10-14 00:15:57.402779: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:15:57.505220: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_o5_oc96_r320\n",
      "2024-10-14 00:15:57.605315: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 400282 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 194, Total Ops 507, % non-converted = 38.26 %\n",
      " * 194 ARITH ops\n",
      "\n",
      "- arith.constant:  194 occurrences  (f32: 185, i32: 9)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 30)\n",
      "  (f32: 76)\n",
      "  (f32: 30)\n",
      "  (f32: 52)\n",
      "  (f32: 1)\n",
      "  (f32: 14)\n",
      "  (f32: 4)\n",
      "  (f32: 103)\n",
      "2024-10-14 00:15:58.201439: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 14.454 G  ops, equivalently 7.227 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:16:05.730490: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 14.454 G  ops, equivalently 7.227 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o5_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o5_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:16:26.925138: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:16:26.925181: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:16:26.925405: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_o5_oc128_r320\n",
      "2024-10-14 00:16:26.967716: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:16:26.967741: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_o5_oc128_r320\n",
      "2024-10-14 00:16:27.019646: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:16:27.181505: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_o5_oc128_r320\n",
      "2024-10-14 00:16:27.349006: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 423603 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 244, Total Ops 632, % non-converted = 38.61 %\n",
      " * 244 ARITH ops\n",
      "\n",
      "- arith.constant:  244 occurrences  (f32: 235, i32: 9)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 35)\n",
      "  (f32: 86)\n",
      "  (f32: 45)\n",
      "  (f32: 67)\n",
      "  (f32: 1)\n",
      "  (f32: 14)\n",
      "  (f32: 4)\n",
      "  (f32: 133)\n",
      "2024-10-14 00:16:28.746309: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 26.406 G  ops, equivalently 13.203 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:16:41.882770: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 26.406 G  ops, equivalently 13.203 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDPAFPN\n",
    "\n",
    "num_outs_list = [\n",
    "    3,\n",
    "    4,\n",
    "]\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"mm\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for num_outs in num_outs_list:\n",
    "    for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "        backbone_value = backbone_dict[backbone_key]\n",
    "        model = OCDPAFPN(\n",
    "            backbone=backbone_value,\n",
    "            n_classes=1,\n",
    "            num_outs=num_outs,\n",
    "            out_channel=out_channel,\n",
    "            fpn_type=fpn_type,\n",
    "        )\n",
    "\n",
    "        model_name = f\"pafpn_mobilenetv4_conv_{backbone_key}_o{num_outs}_oc{out_channel}_r{image_size[0]}\"\n",
    "        torch_model_path = None  # use initialized model\n",
    "        onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "        tf_folder_path = f\"saved_model/{model_name}\"\n",
    "        tflite_model_path = onnx_model_path.replace(\n",
    "            \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "        )\n",
    "        calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "        converter = Converter(\n",
    "            model,\n",
    "            image_size,\n",
    "            torch_model_path,\n",
    "            onnx_model_path,\n",
    "            tf_folder_path,\n",
    "            tflite_model_path,\n",
    "            calib_data_path,\n",
    "            opset_version=11,\n",
    "        )\n",
    "        converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: mm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:56:34.771598: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:56:34.771635: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:56:34.771839: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_o4_oc64_r320\n",
      "2024-10-07 23:56:34.777767: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:56:34.777801: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_o4_oc64_r320\n",
      "2024-10-07 23:56:34.788754: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:56:34.852714: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_o4_oc64_r320\n",
      "2024-10-07 23:56:34.916655: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 144816 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 123, Total Ops 320, % non-converted = 38.44 %\n",
      " * 123 ARITH ops\n",
      "\n",
      "- arith.constant:  123 occurrences  (f32: 115, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 19)\n",
      "  (f32: 52)\n",
      "  (f32: 15)\n",
      "  (f32: 32)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 63)\n",
      "2024-10-07 23:56:35.235971: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.129 G  ops, equivalently 1.065 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:56:37.087270: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.129 G  ops, equivalently 1.065 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:56:48.801691: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:56:48.801740: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:56:48.801966: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_o4_oc96_r320\n",
      "2024-10-07 23:56:48.820256: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:56:48.820295: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_o4_oc96_r320\n",
      "2024-10-07 23:56:48.844323: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:56:48.959540: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_o4_oc96_r320\n",
      "2024-10-07 23:56:49.059790: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 257825 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 185, Total Ops 463, % non-converted = 39.96 %\n",
      " * 185 ARITH ops\n",
      "\n",
      "- arith.constant:  185 occurrences  (f32: 177, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 27)\n",
      "  (f32: 68)\n",
      "  (f32: 30)\n",
      "  (f32: 46)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 91)\n",
      "2024-10-07 23:56:49.717354: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.142 G  ops, equivalently 3.071 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:56:53.993938: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.142 G  ops, equivalently 3.071 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:57:09.509277: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:57:09.509310: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:57:09.509496: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_o4_oc128_r320\n",
      "2024-10-07 23:57:09.543057: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:57:09.543095: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_o4_oc128_r320\n",
      "2024-10-07 23:57:09.577611: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:57:09.711547: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_o4_oc128_r320\n",
      "2024-10-07 23:57:09.861317: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 351822 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 235, Total Ops 588, % non-converted = 39.97 %\n",
      " * 235 ARITH ops\n",
      "\n",
      "- arith.constant:  235 occurrences  (f32: 227, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 32)\n",
      "  (f32: 78)\n",
      "  (f32: 45)\n",
      "  (f32: 61)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 121)\n",
      "2024-10-07 23:57:11.166241: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.293 G  ops, equivalently 6.646 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:57:20.668531: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.293 G  ops, equivalently 6.646 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDPAFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"mm\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDPAFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=1,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"pafpn_mobilenetv4_conv_{backbone_key}_o{num_outs}_oc{out_channel}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: extra_dw"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from models.fpn import OCDPAFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"extra_dw\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDPAFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=1,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"pafpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_ib_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_ib_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:58:36.625772: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:58:36.625808: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:58:36.626004: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_ib_o4_oc64_r320\n",
      "2024-10-07 23:58:36.631723: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:58:36.631739: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_ib_o4_oc64_r320\n",
      "2024-10-07 23:58:36.643301: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:58:36.710963: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_ib_o4_oc64_r320\n",
      "2024-10-07 23:58:36.778154: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 152151 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 136, Total Ops 357, % non-converted = 38.10 %\n",
      " * 136 ARITH ops\n",
      "\n",
      "- arith.constant:  136 occurrences  (f32: 128, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 22)\n",
      "  (f32: 58)\n",
      "  (f32: 21)\n",
      "  (f32: 35)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 69)\n",
      "2024-10-07 23:58:37.103241: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.198 G  ops, equivalently 1.099 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:58:39.455732: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.198 G  ops, equivalently 1.099 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_ib_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_ib_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:58:51.866976: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:58:51.867027: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:58:51.867268: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_ib_o4_oc96_r320\n",
      "2024-10-07 23:58:51.883333: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:58:51.883375: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_ib_o4_oc96_r320\n",
      "2024-10-07 23:58:51.908685: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:58:52.017126: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_ib_o4_oc96_r320\n",
      "2024-10-07 23:58:52.123748: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 256481 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 198, Total Ops 500, % non-converted = 39.60 %\n",
      " * 198 ARITH ops\n",
      "\n",
      "- arith.constant:  198 occurrences  (f32: 190, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 30)\n",
      "  (f32: 74)\n",
      "  (f32: 36)\n",
      "  (f32: 49)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 97)\n",
      "2024-10-07 23:58:52.748460: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.285 G  ops, equivalently 3.142 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:58:57.830778: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.285 G  ops, equivalently 3.142 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_ib_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_ib_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-07 23:59:15.654810: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-07 23:59:15.654869: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-07 23:59:15.655118: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_ib_o4_oc128_r320\n",
      "2024-10-07 23:59:15.703481: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-07 23:59:15.703517: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_ib_o4_oc128_r320\n",
      "2024-10-07 23:59:15.753392: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-07 23:59:15.892776: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_ib_o4_oc128_r320\n",
      "2024-10-07 23:59:16.008172: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 353057 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 248, Total Ops 625, % non-converted = 39.68 %\n",
      " * 248 ARITH ops\n",
      "\n",
      "- arith.constant:  248 occurrences  (f32: 240, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 35)\n",
      "  (f32: 84)\n",
      "  (f32: 51)\n",
      "  (f32: 64)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 127)\n",
      "2024-10-07 23:59:17.402326: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.534 G  ops, equivalently 6.767 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-07 23:59:28.165044: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.534 G  ops, equivalently 6.767 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDPAFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"ib\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDPAFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=1,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"pafpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: convnext"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "2024-10-11 15:34:39.604372: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.695287: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.696943: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.697979: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.712885: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.715942: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.717852: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.719173: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.720586: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.721674: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.723858: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:39.725311: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.844558: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.846268: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.847379: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.848683: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.850221: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.851255: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.852230: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.853375: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.854707: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.855681: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.857923: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.859509: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.928511: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.929664: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.931604: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.932988: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.934493: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.935454: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.938089: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.939192: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.940348: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.941297: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 45263 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:01:00.0, compute capability: 8.0\n",
      "2024-10-11 15:34:41.959395: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.969125: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 46304 MB memory:  -> device: 1, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:41:00.0, compute capability: 8.0\n",
      "2024-10-11 15:34:41.971184: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:41.972893: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 40212 MB memory:  -> device: 2, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:81:00.0, compute capability: 8.0\n",
      "2024-10-11 15:34:41.998515: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:34:42.000043: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 29098 MB memory:  -> device: 3, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:c1:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_convnext_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_convnext_o4_oc64_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:35:07.564224: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:35:07.564275: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:35:07.564991: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_convnext_o4_oc64_r320\n",
      "2024-10-11 15:35:07.593246: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:35:07.593271: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_convnext_o4_oc64_r320\n",
      "2024-10-11 15:35:07.722477: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-11 15:35:07.726571: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:35:07.879824: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_convnext_o4_oc64_r320\n",
      "2024-10-11 15:35:08.000537: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 435548 microseconds.\n",
      "2024-10-11 15:35:08.267341: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 136, Total Ops 348, % non-converted = 39.08 %\n",
      " * 136 ARITH ops\n",
      "\n",
      "- arith.constant:  136 occurrences  (f32: 128, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 22)\n",
      "  (f32: 58)\n",
      "  (f32: 21)\n",
      "  (f32: 32)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 63)\n",
      "2024-10-11 15:35:08.672001: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.963 G  ops, equivalently 0.981 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:35:11.137340: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.963 G  ops, equivalently 0.981 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:35:39.483724: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:35:39.483782: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:35:39.484078: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320\n",
      "2024-10-11 15:35:39.560914: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:35:39.560962: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320\n",
      "2024-10-11 15:35:39.610912: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:35:39.797476: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320\n",
      "2024-10-11 15:35:39.931668: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 447591 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 198, Total Ops 491, % non-converted = 40.33 %\n",
      " * 198 ARITH ops\n",
      "\n",
      "- arith.constant:  198 occurrences  (f32: 190, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 30)\n",
      "  (f32: 74)\n",
      "  (f32: 36)\n",
      "  (f32: 46)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 91)\n",
      "2024-10-11 15:35:41.319832: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.764 G  ops, equivalently 2.882 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:35:48.035783: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.764 G  ops, equivalently 2.882 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_convnext_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_convnext_o4_oc128_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:37:15.039150: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:37:15.039202: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:37:15.039454: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_convnext_o4_oc128_r320\n",
      "2024-10-11 15:37:15.124346: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:37:15.124386: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_convnext_o4_oc128_r320\n",
      "2024-10-11 15:37:15.251055: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:37:15.698525: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_convnext_o4_oc128_r320\n",
      "2024-10-11 15:37:15.907188: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 867732 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 248, Total Ops 616, % non-converted = 40.26 %\n",
      " * 248 ARITH ops\n",
      "\n",
      "- arith.constant:  248 occurrences  (f32: 240, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 35)\n",
      "  (f32: 84)\n",
      "  (f32: 51)\n",
      "  (f32: 61)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 121)\n",
      "2024-10-11 15:37:18.408725: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 12.616 G  ops, equivalently 6.308 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:37:34.321111: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 12.616 G  ops, equivalently 6.308 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDPAFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"convnext\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDPAFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=1,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"pafpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### c80\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### num_outs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o3_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o3_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:16:49.440363: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:16:49.440404: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:16:49.440668: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_o3_oc64_r320_coco\n",
      "2024-10-14 00:16:49.448887: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:16:49.448928: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_o3_oc64_r320_coco\n",
      "2024-10-14 00:16:49.465410: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:16:49.556644: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_o3_oc64_r320_coco\n",
      "2024-10-14 00:16:49.644480: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 203812 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 114, Total Ops 281, % non-converted = 40.57 %\n",
      " * 114 ARITH ops\n",
      "\n",
      "- arith.constant:  114 occurrences  (f32: 108, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 16)\n",
      "  (f32: 45)\n",
      "  (f32: 15)\n",
      "  (f32: 27)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 54)\n",
      "2024-10-14 00:16:49.928927: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.106 G  ops, equivalently 0.553 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:16:51.213891: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.106 G  ops, equivalently 0.553 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o3_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o3_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:17:02.685253: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:17:02.685298: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:17:02.685550: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_o3_oc96_r320_coco\n",
      "2024-10-14 00:17:02.859331: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:17:02.859375: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_o3_oc96_r320_coco\n",
      "2024-10-14 00:17:02.877407: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:17:02.954239: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_o3_oc96_r320_coco\n",
      "2024-10-14 00:17:03.028743: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 343194 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 175, Total Ops 423, % non-converted = 41.37 %\n",
      " * 175 ARITH ops\n",
      "\n",
      "- arith.constant:  175 occurrences  (f32: 169, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 24)\n",
      "  (f32: 61)\n",
      "  (f32: 30)\n",
      "  (f32: 41)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 82)\n",
      "2024-10-14 00:17:03.483077: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 4.391 G  ops, equivalently 2.195 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:17:07.188355: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 4.391 G  ops, equivalently 2.195 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o3_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o3_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:17:27.174855: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:17:27.174906: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:17:27.175137: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_o3_oc128_r320_coco\n",
      "2024-10-14 00:17:27.216998: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:17:27.217033: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_o3_oc128_r320_coco\n",
      "2024-10-14 00:17:27.267321: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:17:27.397342: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_o3_oc128_r320_coco\n",
      "2024-10-14 00:17:27.520167: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 345032 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 225, Total Ops 548, % non-converted = 41.06 %\n",
      " * 225 ARITH ops\n",
      "\n",
      "- arith.constant:  225 occurrences  (f32: 219, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 29)\n",
      "  (f32: 71)\n",
      "  (f32: 45)\n",
      "  (f32: 56)\n",
      "  (f32: 5)\n",
      "  (f32: 2)\n",
      "  (f32: 112)\n",
      "2024-10-14 00:17:28.587424: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 10.643 G  ops, equivalently 5.321 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:17:36.929644: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 10.643 G  ops, equivalently 5.321 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o5_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o5_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:17:51.756676: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:17:51.756710: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:17:51.756937: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_o5_oc64_r320_coco\n",
      "2024-10-14 00:17:51.814303: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:17:51.814336: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_o5_oc64_r320_coco\n",
      "2024-10-14 00:17:51.828807: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:17:51.913590: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_o5_oc64_r320_coco\n",
      "2024-10-14 00:17:51.997608: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 240672 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 131, Total Ops 363, % non-converted = 36.09 %\n",
      " * 131 ARITH ops\n",
      "\n",
      "- arith.constant:  131 occurrences  (f32: 123, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 22)\n",
      "  (f32: 60)\n",
      "  (f32: 15)\n",
      "  (f32: 38)\n",
      "  (f32: 14)\n",
      "  (f32: 4)\n",
      "  (f32: 76)\n",
      "2024-10-14 00:17:52.377462: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.506 G  ops, equivalently 3.253 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:17:56.545210: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.506 G  ops, equivalently 3.253 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o5_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o5_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:18:11.136376: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:18:11.136419: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:18:11.136641: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_o5_oc96_r320_coco\n",
      "2024-10-14 00:18:11.156045: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:18:11.156099: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_o5_oc96_r320_coco\n",
      "2024-10-14 00:18:11.184738: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:18:11.309414: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_o5_oc96_r320_coco\n",
      "2024-10-14 00:18:11.472766: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 336125 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 192, Total Ops 505, % non-converted = 38.02 %\n",
      " * 192 ARITH ops\n",
      "\n",
      "- arith.constant:  192 occurrences  (f32: 184, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 30)\n",
      "  (f32: 76)\n",
      "  (f32: 30)\n",
      "  (f32: 52)\n",
      "  (f32: 14)\n",
      "  (f32: 4)\n",
      "  (f32: 104)\n",
      "2024-10-14 00:18:12.167018: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 21.991 G  ops, equivalently 10.995 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:18:22.125483: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 21.991 G  ops, equivalently 10.995 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o5_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o5_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-14 00:18:47.695924: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-14 00:18:47.695962: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-14 00:18:47.696193: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_o5_oc128_r320_coco\n",
      "2024-10-14 00:18:47.740080: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-14 00:18:47.740162: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_o5_oc128_r320_coco\n",
      "2024-10-14 00:18:47.793587: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-14 00:18:47.958901: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_o5_oc128_r320_coco\n",
      "2024-10-14 00:18:48.110954: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 414762 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 242, Total Ops 630, % non-converted = 38.41 %\n",
      " * 242 ARITH ops\n",
      "\n",
      "- arith.constant:  242 occurrences  (f32: 234, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 35)\n",
      "  (f32: 86)\n",
      "  (f32: 45)\n",
      "  (f32: 67)\n",
      "  (f32: 14)\n",
      "  (f32: 4)\n",
      "  (f32: 134)\n",
      "2024-10-14 00:18:49.514326: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 41.606 G  ops, equivalently 20.803 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-14 00:19:06.357006: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 41.606 G  ops, equivalently 20.803 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDPAFPN\n",
    "\n",
    "num_outs_list = [\n",
    "    3,\n",
    "    4,\n",
    "]\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"mm\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for num_outs in num_outs_list:\n",
    "    for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "        backbone_value = backbone_dict[backbone_key]\n",
    "        model = OCDPAFPN(\n",
    "            backbone=backbone_value,\n",
    "            n_classes=80,\n",
    "            num_outs=num_outs,\n",
    "            out_channel=out_channel,\n",
    "            fpn_type=fpn_type,\n",
    "        )\n",
    "\n",
    "        model_name = f\"pafpn_mobilenetv4_conv_{backbone_key}_o{num_outs}_oc{out_channel}_r{image_size[0]}_coco\"\n",
    "        torch_model_path = None  # use initialized model\n",
    "        onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "        tf_folder_path = f\"saved_model/{model_name}\"\n",
    "        tflite_model_path = onnx_model_path.replace(\n",
    "            \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "        )\n",
    "        calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "        converter = Converter(\n",
    "            model,\n",
    "            image_size,\n",
    "            torch_model_path,\n",
    "            onnx_model_path,\n",
    "            tf_folder_path,\n",
    "            tflite_model_path,\n",
    "            calib_data_path,\n",
    "            opset_version=11,\n",
    "        )\n",
    "        converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: mm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:38:03.254905: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:38:03.254957: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:38:03.255225: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_o4_oc64_r320_coco\n",
      "2024-10-11 15:38:03.275769: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:38:03.275809: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_o4_oc64_r320_coco\n",
      "2024-10-11 15:38:03.300253: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:38:03.389204: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_o4_oc64_r320_coco\n",
      "2024-10-11 15:38:03.480627: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 225404 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 122, Total Ops 319, % non-converted = 38.24 %\n",
      " * 122 ARITH ops\n",
      "\n",
      "- arith.constant:  122 occurrences  (f32: 115, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 19)\n",
      "  (f32: 52)\n",
      "  (f32: 15)\n",
      "  (f32: 32)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 64)\n",
      "2024-10-11 15:38:04.148605: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.162 G  ops, equivalently 1.081 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:38:07.089131: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.162 G  ops, equivalently 1.081 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:38:40.840959: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:38:40.841006: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:38:40.841258: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_o4_oc96_r320_coco\n",
      "2024-10-11 15:38:41.036651: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:38:41.036697: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_o4_oc96_r320_coco\n",
      "2024-10-11 15:38:41.075199: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:38:41.294672: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_o4_oc96_r320_coco\n",
      "2024-10-11 15:38:41.547930: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 706673 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 183, Total Ops 461, % non-converted = 39.70 %\n",
      " * 183 ARITH ops\n",
      "\n",
      "- arith.constant:  183 occurrences  (f32: 176, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 27)\n",
      "  (f32: 68)\n",
      "  (f32: 30)\n",
      "  (f32: 46)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 92)\n",
      "2024-10-11 15:38:42.862833: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.714 G  ops, equivalently 3.857 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:38:51.252652: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.714 G  ops, equivalently 3.857 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:39:24.977049: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:39:24.977105: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:39:24.977430: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_o4_oc128_r320_coco\n",
      "2024-10-11 15:39:25.029431: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:39:25.029477: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_o4_oc128_r320_coco\n",
      "2024-10-11 15:39:25.108106: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:39:25.506593: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_o4_oc128_r320_coco\n",
      "2024-10-11 15:39:25.753926: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 776498 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 233, Total Ops 586, % non-converted = 39.76 %\n",
      " * 233 ARITH ops\n",
      "\n",
      "- arith.constant:  233 occurrences  (f32: 226, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 32)\n",
      "  (f32: 78)\n",
      "  (f32: 45)\n",
      "  (f32: 61)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 122)\n",
      "2024-10-11 15:39:29.758441: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 16.489 G  ops, equivalently 8.244 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:39:48.056723: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 16.489 G  ops, equivalently 8.244 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDPAFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"mm\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDPAFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=80,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"pafpn_mobilenetv4_conv_{backbone_key}_o{num_outs}_oc{out_channel}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: extra_dw"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "2024-10-11 15:44:59.262469: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:44:59.498583: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:44:59.499816: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:44:59.612959: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:45:00.251950: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:45:00.253213: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:45:01.104202: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:45:01.105535: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:45:01.106547: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 15:45:01.107495: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 44940 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:41:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using existing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:45:24.474189: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:45:24.474252: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:45:24.475008: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320_coco\n",
      "2024-10-11 15:45:24.493191: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:45:24.493228: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320_coco\n",
      "2024-10-11 15:45:24.567073: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-11 15:45:24.571833: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:45:24.726379: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_extra_dw_o4_oc64_r320_coco\n",
      "2024-10-11 15:45:24.810262: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 335256 microseconds.\n",
      "2024-10-11 15:45:25.066788: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 142, Total Ops 378, % non-converted = 37.57 %\n",
      " * 142 ARITH ops\n",
      "\n",
      "- arith.constant:  142 occurrences  (f32: 135, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 22)\n",
      "  (f32: 58)\n",
      "  (f32: 27)\n",
      "  (f32: 38)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 76)\n",
      "2024-10-11 15:45:25.385593: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.951 G  ops, equivalently 0.975 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:45:28.644761: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.951 G  ops, equivalently 0.975 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:46:02.367313: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:46:02.367367: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:46:02.367626: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320_coco\n",
      "2024-10-11 15:46:02.492431: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:46:02.492478: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320_coco\n",
      "2024-10-11 15:46:02.545253: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:46:02.820482: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_extra_dw_o4_oc96_r320_coco\n",
      "2024-10-11 15:46:03.068224: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 700598 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 203, Total Ops 520, % non-converted = 39.04 %\n",
      " * 203 ARITH ops\n",
      "\n",
      "- arith.constant:  203 occurrences  (f32: 196, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 30)\n",
      "  (f32: 74)\n",
      "  (f32: 42)\n",
      "  (f32: 52)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 104)\n",
      "2024-10-11 15:46:04.136037: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.216 G  ops, equivalently 3.608 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:46:13.898870: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.216 G  ops, equivalently 3.608 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:47:11.349772: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:47:11.349828: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:47:11.350061: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320_coco\n",
      "2024-10-11 15:47:11.395327: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:47:11.395446: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320_coco\n",
      "2024-10-11 15:47:11.509483: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:47:11.795945: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_extra_dw_o4_oc128_r320_coco\n",
      "2024-10-11 15:47:12.027556: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 677496 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 253, Total Ops 645, % non-converted = 39.22 %\n",
      " * 253 ARITH ops\n",
      "\n",
      "- arith.constant:  253 occurrences  (f32: 246, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 35)\n",
      "  (f32: 84)\n",
      "  (f32: 57)\n",
      "  (f32: 67)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 134)\n",
      "2024-10-11 15:47:15.182854: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.584 G  ops, equivalently 7.792 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:47:34.102312: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.584 G  ops, equivalently 7.792 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDPAFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"extra_dw\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDPAFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=80,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"pafpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: ib"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_ib_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_ib_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:47:51.354770: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:47:51.354809: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:47:51.355025: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_ib_o4_oc64_r320_coco\n",
      "2024-10-11 15:47:51.389091: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:47:51.389118: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_ib_o4_oc64_r320_coco\n",
      "2024-10-11 15:47:51.402246: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:47:51.543524: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_ib_o4_oc64_r320_coco\n",
      "2024-10-11 15:47:51.642476: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 287454 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 135, Total Ops 356, % non-converted = 37.92 %\n",
      " * 135 ARITH ops\n",
      "\n",
      "- arith.constant:  135 occurrences  (f32: 128, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 22)\n",
      "  (f32: 58)\n",
      "  (f32: 21)\n",
      "  (f32: 35)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 70)\n",
      "2024-10-11 15:47:52.507495: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.105 G  ops, equivalently 1.052 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:47:55.562606: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.105 G  ops, equivalently 1.052 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_ib_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_ib_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:48:30.251712: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:48:30.251768: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:48:30.252024: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_ib_o4_oc96_r320_coco\n",
      "2024-10-11 15:48:30.269558: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:48:30.269601: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_ib_o4_oc96_r320_coco\n",
      "2024-10-11 15:48:30.297502: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:48:30.481067: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_ib_o4_oc96_r320_coco\n",
      "2024-10-11 15:48:30.718034: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 466011 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 196, Total Ops 498, % non-converted = 39.36 %\n",
      " * 196 ARITH ops\n",
      "\n",
      "- arith.constant:  196 occurrences  (f32: 189, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 30)\n",
      "  (f32: 74)\n",
      "  (f32: 36)\n",
      "  (f32: 49)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 98)\n",
      "2024-10-11 15:48:32.284693: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.576 G  ops, equivalently 3.788 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:48:40.941434: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.576 G  ops, equivalently 3.788 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_ib_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_ib_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:49:28.219257: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:49:28.219288: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:49:28.219486: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_ib_o4_oc128_r320_coco\n",
      "2024-10-11 15:49:28.321636: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:49:28.321683: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_ib_o4_oc128_r320_coco\n",
      "2024-10-11 15:49:28.366517: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:49:28.591980: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_ib_o4_oc128_r320_coco\n",
      "2024-10-11 15:49:28.772154: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 552669 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 246, Total Ops 623, % non-converted = 39.49 %\n",
      " * 246 ARITH ops\n",
      "\n",
      "- arith.constant:  246 occurrences  (f32: 239, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 35)\n",
      "  (f32: 84)\n",
      "  (f32: 51)\n",
      "  (f32: 64)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 128)\n",
      "2024-10-11 15:49:30.973582: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 16.237 G  ops, equivalently 8.118 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:49:52.680640: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 16.237 G  ops, equivalently 8.118 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDPAFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"ib\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDPAFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=80,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"pafpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### fpn_type: convnext"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_convnext_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_small_convnext_o4_oc64_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:50:15.628325: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:50:15.628376: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:50:15.628626: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_small_convnext_o4_oc64_r320_coco\n",
      "2024-10-11 15:50:15.640004: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:50:15.640147: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_small_convnext_o4_oc64_r320_coco\n",
      "2024-10-11 15:50:15.658773: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:50:15.760639: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_small_convnext_o4_oc64_r320_coco\n",
      "2024-10-11 15:50:15.850140: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 221516 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 135, Total Ops 347, % non-converted = 38.90 %\n",
      " * 135 ARITH ops\n",
      "\n",
      "- arith.constant:  135 occurrences  (f32: 128, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 22)\n",
      "  (f32: 58)\n",
      "  (f32: 21)\n",
      "  (f32: 32)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 64)\n",
      "2024-10-11 15:50:16.755793: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.996 G  ops, equivalently 0.998 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:50:20.569140: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.996 G  ops, equivalently 0.998 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:51:03.230712: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:51:03.230775: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:51:03.231094: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320_coco\n",
      "2024-10-11 15:51:03.267472: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:51:03.267511: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320_coco\n",
      "2024-10-11 15:51:03.314259: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:51:03.558044: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_medium_convnext_o4_oc96_r320_coco\n",
      "2024-10-11 15:51:03.818826: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 587732 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 196, Total Ops 489, % non-converted = 40.08 %\n",
      " * 196 ARITH ops\n",
      "\n",
      "- arith.constant:  196 occurrences  (f32: 189, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 30)\n",
      "  (f32: 74)\n",
      "  (f32: 36)\n",
      "  (f32: 46)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 92)\n",
      "2024-10-11 15:51:04.948205: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.335 G  ops, equivalently 3.668 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:51:14.401381: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.335 G  ops, equivalently 3.668 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_convnext_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/pafpn_mobilenetv4_conv_large_convnext_o4_oc128_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:52:29.029818: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:52:29.029896: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:52:29.030216: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/pafpn_mobilenetv4_conv_large_convnext_o4_oc128_r320_coco\n",
      "2024-10-11 15:52:29.081927: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:52:29.081960: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/pafpn_mobilenetv4_conv_large_convnext_o4_oc128_r320_coco\n",
      "2024-10-11 15:52:29.140118: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:52:29.316386: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/pafpn_mobilenetv4_conv_large_convnext_o4_oc128_r320_coco\n",
      "2024-10-11 15:52:29.518300: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 488087 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 246, Total Ops 614, % non-converted = 40.07 %\n",
      " * 246 ARITH ops\n",
      "\n",
      "- arith.constant:  246 occurrences  (f32: 239, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 35)\n",
      "  (f32: 84)\n",
      "  (f32: 51)\n",
      "  (f32: 61)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 122)\n",
      "2024-10-11 15:52:32.073767: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.812 G  ops, equivalently 7.906 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:52:49.327069: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.812 G  ops, equivalently 7.906 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDPAFPN\n",
    "\n",
    "num_outs = 4\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "fpn_type = \"convnext\"\n",
    "\n",
    "list_out_channel = [64, 96, 128]\n",
    "\n",
    "for out_channel, backbone_key in zip(list_out_channel, backbone_dict.keys()):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDPAFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=80,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "        fpn_type=fpn_type,\n",
    "    )\n",
    "\n",
    "    model_name = f\"pafpn_mobilenetv4_conv_{backbone_key}_{fpn_type}_o{num_outs}_oc{out_channel}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## MobileNetV4Unet\n",
    "\n",
    "only works with Converter1 if padding code is commented"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### c1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "2024-10-11 14:51:21.320205: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.321875: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.323539: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.324820: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.335474: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.336818: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.338567: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.339879: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.341779: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.342962: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.344569: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:21.345628: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.072427: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.075544: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.076886: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.077925: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.079611: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.081055: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.082406: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.084707: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.087464: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.089764: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.091216: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.092247: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.184737: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.187225: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.188421: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.189518: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.190979: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.192918: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.194040: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.195047: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.196337: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.198061: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 22126 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:01:00.0, compute capability: 8.0\n",
      "2024-10-11 14:51:23.207344: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.208634: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:1 with 51554 MB memory:  -> device: 1, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:41:00.0, compute capability: 8.0\n",
      "2024-10-11 14:51:23.209143: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.210278: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:2 with 18226 MB memory:  -> device: 2, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:81:00.0, compute capability: 8.0\n",
      "2024-10-11 14:51:23.210584: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-11 14:51:23.212289: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:3 with 1286 MB memory:  -> device: 3, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:c1:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_small_bilinear_r320_w0.25/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_small_bilinear_r320_w0.25/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:51:34.925234: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:51:34.925272: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:51:34.925897: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_small_bilinear_r320_w0.25\n",
      "2024-10-11 14:51:34.930256: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:51:34.930282: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_small_bilinear_r320_w0.25\n",
      "2024-10-11 14:51:34.942918: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-11 14:51:34.944888: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:51:34.991603: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_small_bilinear_r320_w0.25\n",
      "2024-10-11 14:51:35.028150: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 102253 microseconds.\n",
      "2024-10-11 14:51:35.100732: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 118, Total Ops 268, % non-converted = 44.03 %\n",
      " * 118 ARITH ops\n",
      "\n",
      "- arith.constant:  118 occurrences  (f32: 110, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 10)\n",
      "  (f32: 3)\n",
      "  (f32: 41)\n",
      "  (f32: 15)\n",
      "  (f32: 24)\n",
      "  (f32: 1)\n",
      "  (f32: 3)\n",
      "  (f32: 50)\n",
      "2024-10-11 14:51:35.312836: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.700 G  ops, equivalently 0.850 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:51:37.186428: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.700 G  ops, equivalently 0.850 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_medium_bilinear_r320_w0.5/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_medium_bilinear_r320_w0.5/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:52:02.889835: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:52:02.889899: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:52:02.890139: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_medium_bilinear_r320_w0.5\n",
      "2024-10-11 14:52:02.909918: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:52:02.909970: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_medium_bilinear_r320_w0.5\n",
      "2024-10-11 14:52:02.975153: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:52:03.162224: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_medium_bilinear_r320_w0.5\n",
      "2024-10-11 14:52:03.279094: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 388955 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 180, Total Ops 411, % non-converted = 43.80 %\n",
      " * 180 ARITH ops\n",
      "\n",
      "- arith.constant:  180 occurrences  (f32: 172, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 3)\n",
      "  (f32: 57)\n",
      "  (f32: 30)\n",
      "  (f32: 38)\n",
      "  (f32: 1)\n",
      "  (f32: 3)\n",
      "  (f32: 78)\n",
      "2024-10-11 14:52:04.460691: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.076 G  ops, equivalently 3.538 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:52:10.663343: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.076 G  ops, equivalently 3.538 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_large_bilinear_r320_w0.75/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_large_bilinear_r320_w0.75/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:52:54.763347: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:52:54.763404: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:52:54.763679: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_large_bilinear_r320_w0.75\n",
      "2024-10-11 14:52:55.184128: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:52:55.184177: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_large_bilinear_r320_w0.75\n",
      "2024-10-11 14:52:55.298131: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:52:55.600380: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_large_bilinear_r320_w0.75\n",
      "2024-10-11 14:52:55.713312: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 949634 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 230, Total Ops 536, % non-converted = 42.91 %\n",
      " * 230 ARITH ops\n",
      "\n",
      "- arith.constant:  230 occurrences  (f32: 222, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 3)\n",
      "  (f32: 67)\n",
      "  (f32: 45)\n",
      "  (f32: 53)\n",
      "  (f32: 1)\n",
      "  (f32: 3)\n",
      "  (f32: 108)\n",
      "2024-10-11 14:52:57.715069: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 17.090 G  ops, equivalently 8.545 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:53:11.705502: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 17.090 G  ops, equivalently 8.545 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_small_convtranspose_r320_w0.25/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_small_convtranspose_r320_w0.25/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:53:30.036518: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:53:30.036582: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:53:30.036854: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_small_convtranspose_r320_w0.25\n",
      "2024-10-11 14:53:30.068216: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:53:30.068262: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_small_convtranspose_r320_w0.25\n",
      "2024-10-11 14:53:30.078709: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:53:30.136646: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_small_convtranspose_r320_w0.25\n",
      "2024-10-11 14:53:30.193863: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 157010 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 123, Total Ops 273, % non-converted = 45.05 %\n",
      " * 123 ARITH ops\n",
      "\n",
      "- arith.constant:  123 occurrences  (f32: 115, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 10)\n",
      "  (f32: 3)\n",
      "  (f32: 41)\n",
      "  (f32: 15)\n",
      "  (f32: 24)\n",
      "  (f32: 1)\n",
      "  (f32: 50)\n",
      "  (f32: 3)\n",
      "2024-10-11 14:53:30.716427: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.980 G  ops, equivalently 0.990 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:53:33.247050: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.980 G  ops, equivalently 0.990 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_medium_convtranspose_r320_w0.5/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_medium_convtranspose_r320_w0.5/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:53:58.787195: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:53:58.787319: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:53:58.787667: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_medium_convtranspose_r320_w0.5\n",
      "2024-10-11 14:53:58.814954: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:53:58.815001: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_medium_convtranspose_r320_w0.5\n",
      "2024-10-11 14:53:58.851044: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:53:58.998754: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_medium_convtranspose_r320_w0.5\n",
      "2024-10-11 14:53:59.145367: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 357701 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 185, Total Ops 416, % non-converted = 44.47 %\n",
      " * 185 ARITH ops\n",
      "\n",
      "- arith.constant:  185 occurrences  (f32: 177, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 3)\n",
      "  (f32: 57)\n",
      "  (f32: 30)\n",
      "  (f32: 38)\n",
      "  (f32: 1)\n",
      "  (f32: 78)\n",
      "  (f32: 3)\n",
      "2024-10-11 14:54:00.464518: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 8.148 G  ops, equivalently 4.074 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:54:06.962661: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 8.148 G  ops, equivalently 4.074 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_large_convtranspose_r320_w0.75/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_large_convtranspose_r320_w0.75/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:54:43.512853: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:54:43.512928: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:54:43.513234: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_large_convtranspose_r320_w0.75\n",
      "2024-10-11 14:54:44.030850: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:54:44.030889: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_large_convtranspose_r320_w0.75\n",
      "2024-10-11 14:54:44.083680: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:54:44.450380: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_large_convtranspose_r320_w0.75\n",
      "2024-10-11 14:54:44.808557: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 1295327 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 235, Total Ops 541, % non-converted = 43.44 %\n",
      " * 235 ARITH ops\n",
      "\n",
      "- arith.constant:  235 occurrences  (f32: 227, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 3)\n",
      "  (f32: 67)\n",
      "  (f32: 45)\n",
      "  (f32: 53)\n",
      "  (f32: 1)\n",
      "  (f32: 108)\n",
      "  (f32: 3)\n",
      "2024-10-11 14:54:48.070307: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 19.465 G  ops, equivalently 9.732 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:55:04.330082: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 19.465 G  ops, equivalently 9.732 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.unet import MobileNetV4Unet\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "scales = [0.25, 0.5, 0.75]\n",
    "modes = [\"bilinear\", \"convtranspose\"]\n",
    "\n",
    "for mode in modes:\n",
    "    for scale, backbone_key in zip(scales, backbone_dict.keys()):\n",
    "        model = MobileNetV4Unet(\n",
    "            backbone=backbone_dict[backbone_key],\n",
    "            mode=mode,\n",
    "            n_classes=1,\n",
    "            width_scale=scale,\n",
    "        )\n",
    "\n",
    "        model_name = (\n",
    "            f\"unet_mobilenetv4_conv_{backbone_key}_{mode}_r{image_size[0]}_w{scale}\"\n",
    "        )\n",
    "        torch_model_path = None  # use initialized model\n",
    "        onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "        tf_folder_path = f\"saved_model/{model_name}\"\n",
    "        tflite_model_path = onnx_model_path.replace(\n",
    "            \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "        )\n",
    "        calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "        converter = Converter(\n",
    "            model,\n",
    "            image_size,\n",
    "            torch_model_path,\n",
    "            onnx_model_path,\n",
    "            tf_folder_path,\n",
    "            tflite_model_path,\n",
    "            calib_data_path,\n",
    "            opset_version=11,\n",
    "        )\n",
    "        converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### c80\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_small_bilinear_r320_w0.25_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_small_bilinear_r320_w0.25_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:55:13.381439: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:55:13.381494: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:55:13.381706: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_small_bilinear_r320_w0.25_coco\n",
      "2024-10-11 14:55:13.386305: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:55:13.386341: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_small_bilinear_r320_w0.25_coco\n",
      "2024-10-11 14:55:13.395061: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:55:13.439299: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_small_bilinear_r320_w0.25_coco\n",
      "2024-10-11 14:55:13.482656: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 100953 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 117, Total Ops 267, % non-converted = 43.82 %\n",
      " * 117 ARITH ops\n",
      "\n",
      "- arith.constant:  117 occurrences  (f32: 110, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 10)\n",
      "  (f32: 3)\n",
      "  (f32: 41)\n",
      "  (f32: 15)\n",
      "  (f32: 24)\n",
      "  (f32: 3)\n",
      "  (f32: 51)\n",
      "2024-10-11 14:55:13.797917: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.716 G  ops, equivalently 0.858 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:55:15.830421: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.716 G  ops, equivalently 0.858 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_medium_bilinear_r320_w0.5_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_medium_bilinear_r320_w0.5_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:55:45.480350: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:55:45.480456: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:55:45.480901: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_medium_bilinear_r320_w0.5_coco\n",
      "2024-10-11 14:55:45.639295: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:55:45.639359: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_medium_bilinear_r320_w0.5_coco\n",
      "2024-10-11 14:55:45.697944: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:55:45.900512: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_medium_bilinear_r320_w0.5_coco\n",
      "2024-10-11 14:55:46.078241: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 597340 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 179, Total Ops 410, % non-converted = 43.66 %\n",
      " * 179 ARITH ops\n",
      "\n",
      "- arith.constant:  179 occurrences  (f32: 172, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 3)\n",
      "  (f32: 57)\n",
      "  (f32: 30)\n",
      "  (f32: 38)\n",
      "  (f32: 3)\n",
      "  (f32: 79)\n",
      "2024-10-11 14:55:47.379067: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.109 G  ops, equivalently 3.554 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:55:53.993336: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.109 G  ops, equivalently 3.554 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_large_bilinear_r320_w0.75_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_large_bilinear_r320_w0.75_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:56:25.763572: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:56:25.763627: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:56:25.763874: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_large_bilinear_r320_w0.75_coco\n",
      "2024-10-11 14:56:26.191503: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:56:26.191553: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_large_bilinear_r320_w0.75_coco\n",
      "2024-10-11 14:56:26.241554: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:56:26.520118: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_large_bilinear_r320_w0.75_coco\n",
      "2024-10-11 14:56:26.821987: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 1058113 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 229, Total Ops 535, % non-converted = 42.80 %\n",
      " * 229 ARITH ops\n",
      "\n",
      "- arith.constant:  229 occurrences  (f32: 222, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 3)\n",
      "  (f32: 67)\n",
      "  (f32: 45)\n",
      "  (f32: 53)\n",
      "  (f32: 3)\n",
      "  (f32: 109)\n",
      "2024-10-11 14:56:28.957234: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 17.139 G  ops, equivalently 8.569 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:56:45.473179: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 17.139 G  ops, equivalently 8.569 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_small_convtranspose_r320_w0.25_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_small_convtranspose_r320_w0.25_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:57:07.049333: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:57:07.049418: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:57:07.049744: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_small_convtranspose_r320_w0.25_coco\n",
      "2024-10-11 14:57:07.058126: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:57:07.058202: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_small_convtranspose_r320_w0.25_coco\n",
      "2024-10-11 14:57:07.071364: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:57:07.148924: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_small_convtranspose_r320_w0.25_coco\n",
      "2024-10-11 14:57:07.226261: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 176521 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 122, Total Ops 272, % non-converted = 44.85 %\n",
      " * 122 ARITH ops\n",
      "\n",
      "- arith.constant:  122 occurrences  (f32: 115, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 10)\n",
      "  (f32: 3)\n",
      "  (f32: 41)\n",
      "  (f32: 15)\n",
      "  (f32: 24)\n",
      "  (f32: 51)\n",
      "  (f32: 3)\n",
      "2024-10-11 14:57:07.666384: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.013 G  ops, equivalently 1.007 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:57:11.167699: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.013 G  ops, equivalently 1.007 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_medium_convtranspose_r320_w0.5_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_medium_convtranspose_r320_w0.5_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:57:49.934449: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:57:49.934494: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:57:49.934850: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_medium_convtranspose_r320_w0.5_coco\n",
      "2024-10-11 14:57:49.961277: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:57:49.961341: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_medium_convtranspose_r320_w0.5_coco\n",
      "2024-10-11 14:57:49.997692: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:57:50.131062: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_medium_convtranspose_r320_w0.5_coco\n",
      "2024-10-11 14:57:50.281927: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 347079 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 184, Total Ops 415, % non-converted = 44.34 %\n",
      " * 184 ARITH ops\n",
      "\n",
      "- arith.constant:  184 occurrences  (f32: 177, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 3)\n",
      "  (f32: 57)\n",
      "  (f32: 30)\n",
      "  (f32: 38)\n",
      "  (f32: 79)\n",
      "  (f32: 3)\n",
      "2024-10-11 14:57:51.166381: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 8.213 G  ops, equivalently 4.107 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:57:57.082972: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 8.213 G  ops, equivalently 4.107 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.weight, norm_head.bias, norm_head.num_batches_tracked, norm_head.running_mean, norm_head.running_var, norm_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_large_convtranspose_r320_w0.75_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/unet_mobilenetv4_conv_large_convtranspose_r320_w0.75_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 14:58:40.738027: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 14:58:40.738079: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 14:58:40.738379: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/unet_mobilenetv4_conv_large_convtranspose_r320_w0.75_coco\n",
      "2024-10-11 14:58:40.879117: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 14:58:40.879160: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/unet_mobilenetv4_conv_large_convtranspose_r320_w0.75_coco\n",
      "2024-10-11 14:58:40.928505: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 14:58:41.202698: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/unet_mobilenetv4_conv_large_convtranspose_r320_w0.75_coco\n",
      "2024-10-11 14:58:41.534805: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 796428 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 234, Total Ops 540, % non-converted = 43.33 %\n",
      " * 234 ARITH ops\n",
      "\n",
      "- arith.constant:  234 occurrences  (f32: 227, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 3)\n",
      "  (f32: 67)\n",
      "  (f32: 45)\n",
      "  (f32: 53)\n",
      "  (f32: 109)\n",
      "  (f32: 3)\n",
      "2024-10-11 14:58:43.647220: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 19.562 G  ops, equivalently 9.781 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 14:58:56.712306: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 19.562 G  ops, equivalently 9.781 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.unet import MobileNetV4Unet\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "scales = [0.25, 0.5, 0.75]\n",
    "modes = [\"bilinear\", \"convtranspose\"]\n",
    "\n",
    "for mode in modes:\n",
    "    for scale, backbone_key in zip(scales, backbone_dict.keys()):\n",
    "        model = MobileNetV4Unet(\n",
    "            backbone=backbone_dict[backbone_key],\n",
    "            mode=mode,\n",
    "            n_classes=80,\n",
    "            width_scale=scale,\n",
    "        )\n",
    "\n",
    "        model_name = f\"unet_mobilenetv4_conv_{backbone_key}_{mode}_r{image_size[0]}_w{scale}_coco\"\n",
    "        torch_model_path = None  # use initialized model\n",
    "        onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "        tf_folder_path = f\"saved_model/{model_name}\"\n",
    "        tflite_model_path = onnx_model_path.replace(\n",
    "            \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "        )\n",
    "        calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "        converter = Converter(\n",
    "            model,\n",
    "            image_size,\n",
    "            torch_model_path,\n",
    "            onnx_model_path,\n",
    "            tf_folder_path,\n",
    "            tflite_model_path,\n",
    "            calib_data_path,\n",
    "            opset_version=11,\n",
    "        )\n",
    "        converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## LiteHRNet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:17:17.778116: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
      "2024-10-11 15:17:17.778152: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
      "2024-10-11 15:17:17.779480: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
      "2024-10-11 15:17:17.786817: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
      "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
      "2024-10-11 15:17:18.618292: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow_addons/utils/tfa_eol_msg.py:23: UserWarning: \n",
      "\n",
      "TensorFlow Addons (TFA) has ended development and introduction of new features.\n",
      "TFA has entered a minimal maintenance and release mode until a planned end of life in May 2024.\n",
      "Please modify downstream libraries to take dependencies from other repositories in our TensorFlow community (e.g. Keras, Keras-CV, and Keras-NLP). \n",
      "\n",
      "For more information see: https://github.com/tensorflow/addons/issues/2807 \n",
      "\n",
      "  warnings.warn(\n",
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "/mnt/ssd2/xxx/repo/mmpose/mmpose/models/backbones/utils/channel_shuffle.py:21: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  assert (num_channels % groups == 0), ('num_channels should be '\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import sys\n",
    "\n",
    "# Add the parent directory of the current dir (i.e. root dir of this repo) to the system path\n",
    "sys.path.append(os.path.dirname(os.getcwd()))\n",
    "from conversion.converter import Converter\n",
    "from models.lite_hrnet import LiteHRNet30\n",
    "\n",
    "image_size = (320, 320)\n",
    "n_classes = 1\n",
    "\n",
    "model = LiteHRNet30(n_classes)\n",
    "model.eval()\n",
    "model_name = f\"lite_hrnet_30_r{image_size[0]}\"\n",
    "torch_model_path = None  # use initilized model\n",
    "onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "tf_folder_path = f\"saved_model/{model_name}\"\n",
    "tflite_model_path = onnx_model_path.replace(\".onnx\", \"_full_integer_quant_uint8.tflite\")\n",
    "calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "converter = Converter(\n",
    "    model,\n",
    "    image_size,\n",
    "    torch_model_path,\n",
    "    onnx_model_path,\n",
    "    tf_folder_path,\n",
    "    tflite_model_path,\n",
    "    calib_data_path,\n",
    "    opset_version=11,\n",
    ")\n",
    "converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## MobilenetV4SimpleBaseline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### c1\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_150_x, add_46_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_small_bilinear_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_small_bilinear_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/simple_mobilenetv4_conv_small_bilinear_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-09 22:15:46.590814: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-09 22:15:46.590852: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-09 22:15:46.591061: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_small_bilinear_r320\n",
      "2024-10-09 22:15:46.629439: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-09 22:15:46.629472: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_small_bilinear_r320\n",
      "2024-10-09 22:15:46.635925: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-09 22:15:46.676221: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_small_bilinear_r320\n",
      "2024-10-09 22:15:46.705238: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 114178 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 106, Total Ops 234, % non-converted = 45.30 %\n",
      " * 106 ARITH ops\n",
      "\n",
      "- arith.constant:  106 occurrences  (f32: 98, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 10)\n",
      "  (f32: 34)\n",
      "  (f32: 15)\n",
      "  (f32: 21)\n",
      "  (f32: 1)\n",
      "  (f32: 3)\n",
      "  (f32: 41)\n",
      "2024-10-09 22:15:46.930190: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.417 G  ops, equivalently 0.708 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-09 22:15:48.247946: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.417 G  ops, equivalently 0.708 G  MACs\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_243_x, add_77_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_medium_bilinear_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_medium_bilinear_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/simple_mobilenetv4_conv_medium_bilinear_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-09 22:15:56.219312: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-09 22:15:56.219345: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-09 22:15:56.219573: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_medium_bilinear_r320\n",
      "2024-10-09 22:15:56.230449: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-09 22:15:56.230485: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_medium_bilinear_r320\n",
      "2024-10-09 22:15:56.245122: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-09 22:15:56.296335: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_medium_bilinear_r320\n",
      "2024-10-09 22:15:56.347990: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 128453 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 168, Total Ops 377, % non-converted = 44.56 %\n",
      " * 168 ARITH ops\n",
      "\n",
      "- arith.constant:  168 occurrences  (f32: 160, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 50)\n",
      "  (f32: 30)\n",
      "  (f32: 35)\n",
      "  (f32: 1)\n",
      "  (f32: 3)\n",
      "  (f32: 69)\n",
      "2024-10-09 22:15:56.758775: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 4.860 G  ops, equivalently 2.430 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-09 22:16:00.403179: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 4.860 G  ops, equivalently 2.430 G  MACs\n",
      "INFO:absl:Function `__call__` contains input name(s) input, x, y with unsupported characters which will be renamed to onnx_tf_prefix_identity_0_input, transpose_318_x, add_101_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_large_bilinear_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_large_bilinear_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/simple_mobilenetv4_conv_large_bilinear_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-09 22:16:17.453208: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-09 22:16:17.453247: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-09 22:16:17.453474: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_large_bilinear_r320\n",
      "2024-10-09 22:16:17.686350: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-09 22:16:17.686405: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_large_bilinear_r320\n",
      "2024-10-09 22:16:17.737505: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-09 22:16:17.835634: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_large_bilinear_r320\n",
      "2024-10-09 22:16:17.926583: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 473111 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 217, Total Ops 501, % non-converted = 43.31 %\n",
      " * 217 ARITH ops\n",
      "\n",
      "- arith.constant:  217 occurrences  (f32: 209, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 60)\n",
      "  (f32: 45)\n",
      "  (f32: 50)\n",
      "  (f32: 1)\n",
      "  (f32: 3)\n",
      "  (f32: 99)\n",
      "2024-10-09 22:16:19.282834: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 11.311 G  ops, equivalently 5.656 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-09 22:16:28.357812: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 11.311 G  ops, equivalently 5.656 G  MACs\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y with unsupported characters which will be renamed to transpose_144_x, add_45_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_small_convtranspose_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_small_convtranspose_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/simple_mobilenetv4_conv_small_convtranspose_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-09 22:16:33.384089: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-09 22:16:33.384123: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-09 22:16:33.384341: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_small_convtranspose_r320\n",
      "2024-10-09 22:16:33.436570: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-09 22:16:33.436596: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_small_convtranspose_r320\n",
      "2024-10-09 22:16:33.443207: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-09 22:16:33.473410: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_small_convtranspose_r320\n",
      "2024-10-09 22:16:33.502743: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 118403 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 109, Total Ops 236, % non-converted = 46.19 %\n",
      " * 109 ARITH ops\n",
      "\n",
      "- arith.constant:  109 occurrences  (f32: 97, i32: 12)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 10)\n",
      "  (f32: 31)\n",
      "  (f32: 15)\n",
      "  (f32: 3)\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 1)\n",
      "  (f32: 3)\n",
      "  (f32: 39)\n",
      "  (f32: 3)\n",
      "2024-10-09 22:16:33.714535: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.201 G  ops, equivalently 0.600 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-09 22:16:34.991958: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.201 G  ops, equivalently 0.600 G  MACs\n",
      "INFO:absl:Function `__call__` contains input name(s) input, x, y with unsupported characters which will be renamed to onnx_tf_prefix_identity_3_input, transpose_237_x, add_76_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-09 22:16:42.850659: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-09 22:16:42.850696: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-09 22:16:42.850912: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320\n",
      "2024-10-09 22:16:42.864790: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-09 22:16:42.864828: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320\n",
      "2024-10-09 22:16:42.880964: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-09 22:16:42.936665: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320\n",
      "2024-10-09 22:16:42.990281: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 139371 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 171, Total Ops 379, % non-converted = 45.12 %\n",
      " * 171 ARITH ops\n",
      "\n",
      "- arith.constant:  171 occurrences  (f32: 159, i32: 12)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 47)\n",
      "  (f32: 30)\n",
      "  (f32: 3)\n",
      "\n",
      "  (f32: 32)\n",
      "  (f32: 1)\n",
      "  (f32: 3)\n",
      "  (f32: 67)\n",
      "  (f32: 3)\n",
      "2024-10-09 22:16:43.486371: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 4.751 G  ops, equivalently 2.375 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-09 22:16:47.355654: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 4.751 G  ops, equivalently 2.375 G  MACs\n",
      "INFO:absl:Function `__call__` contains input name(s) input, x, y with unsupported characters which will be renamed to onnx_tf_prefix_identity_3_input, transpose_312_x, add_101_y in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_large_convtranspose_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_large_convtranspose_r320/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/simple_mobilenetv4_conv_large_convtranspose_r320/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-09 22:17:01.009830: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-09 22:17:01.009862: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-09 22:17:01.010049: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_large_convtranspose_r320\n",
      "2024-10-09 22:17:01.051746: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-09 22:17:01.051789: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_large_convtranspose_r320\n",
      "2024-10-09 22:17:01.093291: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-09 22:17:01.351366: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_large_convtranspose_r320\n",
      "2024-10-09 22:17:01.607907: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 597858 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 221, Total Ops 504, % non-converted = 43.85 %\n",
      " * 221 ARITH ops\n",
      "\n",
      "- arith.constant:  221 occurrences  (f32: 209, i32: 12)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 57)\n",
      "  (f32: 45)\n",
      "  (f32: 3)\n",
      "\n",
      "  (f32: 47)\n",
      "  (f32: 1)\n",
      "  (f32: 3)\n",
      "  (f32: 97)\n",
      "  (f32: 3)\n",
      "2024-10-09 22:17:03.412661: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.193 G  ops, equivalently 6.596 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-09 22:17:13.770583: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.193 G  ops, equivalently 6.596 G  MACs\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import sys\n",
    "\n",
    "# Add the parent directory of the current dir (i.e. root dir of this repo) to the system path\n",
    "sys.path.append(os.path.dirname(os.getcwd()))\n",
    "\n",
    "from models.simple_baseline import MobilenetV4SimpleBaseline\n",
    "\n",
    "modes = [\n",
    "    \"bilinear\",\n",
    "    #  \"nearest\",\n",
    "    \"convtranspose\",\n",
    "]\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "image_size = (320, 320)\n",
    "for mode in modes:\n",
    "    for backbone_key, backbone_value in backbone_dict.items():\n",
    "        model = (\n",
    "            MobilenetV4SimpleBaseline(backbone=backbone_value, n_classes=1, mode=mode)\n",
    "            .float()\n",
    "            .eval()\n",
    "        )\n",
    "\n",
    "        model_name = f\"simple_mobilenetv4_conv_{backbone_key}_{mode}_r{image_size[0]}\"\n",
    "        torch_model_path = None  # use initilized model\n",
    "        onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "        tf_folder_path = f\"saved_model/{model_name}\"\n",
    "        tflite_model_path = onnx_model_path.replace(\n",
    "            \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "        )\n",
    "        calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "        converter = Converter(\n",
    "            model,\n",
    "            image_size,\n",
    "            torch_model_path,\n",
    "            onnx_model_path,\n",
    "            tf_folder_path,\n",
    "            tflite_model_path,\n",
    "            calib_data_path,\n",
    "            opset_version=13,\n",
    "        )\n",
    "        converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### c80\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_small_bilinear_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_small_bilinear_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:07:02.493774: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:07:02.493855: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:07:02.494221: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_small_bilinear_r320_coco\n",
      "2024-10-11 15:07:02.502021: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:07:02.502067: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_small_bilinear_r320_coco\n",
      "2024-10-11 15:07:02.512880: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:07:02.571410: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_small_bilinear_r320_coco\n",
      "2024-10-11 15:07:02.622712: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 128553 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 105, Total Ops 233, % non-converted = 45.06 %\n",
      " * 105 ARITH ops\n",
      "\n",
      "- arith.constant:  105 occurrences  (f32: 98, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 10)\n",
      "  (f32: 34)\n",
      "  (f32: 15)\n",
      "  (f32: 21)\n",
      "  (f32: 3)\n",
      "  (f32: 42)\n",
      "2024-10-11 15:07:03.017270: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.000 G  ops, equivalently 1.000 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:07:05.269848: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.000 G  ops, equivalently 1.000 G  MACs\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_medium_bilinear_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_medium_bilinear_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:07:35.963043: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:07:35.963102: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:07:35.963443: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_medium_bilinear_r320_coco\n",
      "2024-10-11 15:07:36.067302: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:07:36.067352: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_medium_bilinear_r320_coco\n",
      "2024-10-11 15:07:36.092867: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:07:36.187933: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_medium_bilinear_r320_coco\n",
      "2024-10-11 15:07:36.274975: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 311533 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 167, Total Ops 376, % non-converted = 44.41 %\n",
      " * 167 ARITH ops\n",
      "\n",
      "- arith.constant:  167 occurrences  (f32: 160, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 50)\n",
      "  (f32: 30)\n",
      "  (f32: 35)\n",
      "  (f32: 3)\n",
      "  (f32: 70)\n",
      "2024-10-11 15:07:37.247778: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.026 G  ops, equivalently 3.013 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:07:43.360127: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.026 G  ops, equivalently 3.013 G  MACs\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_large_bilinear_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_large_bilinear_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:08:29.393835: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:08:29.393873: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:08:29.394066: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_large_bilinear_r320_coco\n",
      "2024-10-11 15:08:30.080935: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:08:30.080969: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_large_bilinear_r320_coco\n",
      "2024-10-11 15:08:30.114359: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:08:30.230296: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_large_bilinear_r320_coco\n",
      "2024-10-11 15:08:30.348160: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 954095 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 216, Total Ops 500, % non-converted = 43.20 %\n",
      " * 216 ARITH ops\n",
      "\n",
      "- arith.constant:  216 occurrences  (f32: 209, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 60)\n",
      "  (f32: 45)\n",
      "  (f32: 50)\n",
      "  (f32: 3)\n",
      "  (f32: 100)\n",
      "2024-10-11 15:08:31.613148: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.642 G  ops, equivalently 6.821 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:08:41.640662: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.642 G  ops, equivalently 6.821 G  MACs\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_small_convtranspose_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_small_convtranspose_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:08:50.558575: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:08:50.558608: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:08:50.558819: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_small_convtranspose_r320_coco\n",
      "2024-10-11 15:08:50.616153: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:08:50.616183: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_small_convtranspose_r320_coco\n",
      "2024-10-11 15:08:50.623316: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:08:50.655077: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_small_convtranspose_r320_coco\n",
      "2024-10-11 15:08:50.686053: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 127234 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 108, Total Ops 235, % non-converted = 45.96 %\n",
      " * 108 ARITH ops\n",
      "\n",
      "- arith.constant:  108 occurrences  (f32: 97, i32: 11)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 10)\n",
      "  (f32: 31)\n",
      "  (f32: 15)\n",
      "  (f32: 3)\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 3)\n",
      "  (f32: 40)\n",
      "  (f32: 3)\n",
      "2024-10-11 15:08:50.946540: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.719 G  ops, equivalently 0.860 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:08:52.408250: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.719 G  ops, equivalently 0.860 G  MACs\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:09:00.877310: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:09:00.877339: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:09:00.877552: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320_coco\n",
      "2024-10-11 15:09:01.049485: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:09:01.049518: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320_coco\n",
      "2024-10-11 15:09:01.066931: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:09:01.122237: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_medium_convtranspose_r320_coco\n",
      "2024-10-11 15:09:01.176961: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 299412 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 170, Total Ops 378, % non-converted = 44.97 %\n",
      " * 170 ARITH ops\n",
      "\n",
      "- arith.constant:  170 occurrences  (f32: 159, i32: 11)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 18)\n",
      "  (f32: 47)\n",
      "  (f32: 30)\n",
      "  (f32: 3)\n",
      "\n",
      "  (f32: 32)\n",
      "  (f32: 3)\n",
      "  (f32: 68)\n",
      "  (f32: 3)\n",
      "2024-10-11 15:09:01.735225: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.787 G  ops, equivalently 2.893 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:09:06.065912: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 5.787 G  ops, equivalently 2.893 G  MACs\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_large_convtranspose_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/simple_mobilenetv4_conv_large_convtranspose_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-11 15:09:23.317314: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-11 15:09:23.317346: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-11 15:09:23.317556: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/simple_mobilenetv4_conv_large_convtranspose_r320_coco\n",
      "2024-10-11 15:09:23.358356: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-11 15:09:23.358388: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/simple_mobilenetv4_conv_large_convtranspose_r320_coco\n",
      "2024-10-11 15:09:23.442203: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-11 15:09:23.691298: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/simple_mobilenetv4_conv_large_convtranspose_r320_coco\n",
      "2024-10-11 15:09:23.932921: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 615366 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 220, Total Ops 503, % non-converted = 43.74 %\n",
      " * 220 ARITH ops\n",
      "\n",
      "- arith.constant:  220 occurrences  (f32: 209, i32: 11)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 23)\n",
      "  (f32: 57)\n",
      "  (f32: 45)\n",
      "  (f32: 3)\n",
      "\n",
      "  (f32: 47)\n",
      "  (f32: 3)\n",
      "  (f32: 98)\n",
      "  (f32: 3)\n",
      "2024-10-11 15:09:25.748474: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.264 G  ops, equivalently 7.632 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-11 15:09:37.183194: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.264 G  ops, equivalently 7.632 G  MACs\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import sys\n",
    "\n",
    "# Add the parent directory of the current dir (i.e. root dir of this repo) to the system path\n",
    "sys.path.append(os.path.dirname(os.getcwd()))\n",
    "\n",
    "from models.simple_baseline import MobilenetV4SimpleBaseline\n",
    "\n",
    "modes = [\n",
    "    \"bilinear\",\n",
    "    #  \"nearest\",\n",
    "    \"convtranspose\",\n",
    "]\n",
    "backbone_dict = {\n",
    "    \"small\": \"mobilenetv4_conv_small.e2400_r224_in1k\",\n",
    "    \"medium\": \"mobilenetv4_conv_medium.e500_r256_in1k\",\n",
    "    \"large\": \"mobilenetv4_conv_large.e500_r256_in1k\",\n",
    "}\n",
    "image_size = (320, 320)\n",
    "for mode in modes:\n",
    "    for backbone_key, backbone_value in backbone_dict.items():\n",
    "        model = (\n",
    "            MobilenetV4SimpleBaseline(backbone=backbone_value, n_classes=80, mode=mode)\n",
    "            .float()\n",
    "            .eval()\n",
    "        )\n",
    "\n",
    "        model_name = (\n",
    "            f\"simple_mobilenetv4_conv_{backbone_key}_{mode}_r{image_size[0]}_coco\"\n",
    "        )\n",
    "        torch_model_path = None  # use initilized model\n",
    "        onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "        tf_folder_path = f\"saved_model/{model_name}\"\n",
    "        tflite_model_path = onnx_model_path.replace(\n",
    "            \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "        )\n",
    "        calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "        converter = Converter(\n",
    "            model,\n",
    "            image_size,\n",
    "            torch_model_path,\n",
    "            onnx_model_path,\n",
    "            tf_folder_path,\n",
    "            tflite_model_path,\n",
    "            calib_data_path,\n",
    "            opset_version=13,\n",
    "        )\n",
    "        converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## EfficientNetFPN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b0_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b0_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 23:33:23.212183: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 23:33:23.212220: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 23:33:23.212443: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_efficientnet_b0_o4_r320\n",
      "2024-10-13 23:33:23.288690: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 23:33:23.288723: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_efficientnet_b0_o4_r320\n",
      "2024-10-13 23:33:23.305443: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 23:33:23.386751: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_efficientnet_b0_o4_r320\n",
      "2024-10-13 23:33:23.467210: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 254768 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 216, Total Ops 666, % non-converted = 32.43 %\n",
      " * 216 ARITH ops\n",
      "\n",
      "- arith.constant:  216 occurrences  (f32: 179, i32: 37)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 15)\n",
      "  (f32: 80)\n",
      "  (f32: 16)\n",
      "  (f32: 64)\n",
      "  (f32: 16)\n",
      "  (f32: 64)\n",
      "  (f32: 28)\n",
      "  (f32: 65)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 87)\n",
      "2024-10-13 23:33:23.936319: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.586 G  ops, equivalently 1.293 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 23:33:30.153235: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.586 G  ops, equivalently 1.293 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b3_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b3_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 23:33:49.762810: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 23:33:49.762846: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 23:33:49.763095: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_efficientnet_b3_o4_r320\n",
      "2024-10-13 23:33:50.016929: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 23:33:50.016968: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_efficientnet_b3_o4_r320\n",
      "2024-10-13 23:33:50.055966: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 23:33:50.249449: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_efficientnet_b3_o4_r320\n",
      "2024-10-13 23:33:50.413800: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 650706 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 318, Total Ops 1005, % non-converted = 31.64 %\n",
      " * 318 ARITH ops\n",
      "\n",
      "- arith.constant:  318 occurrences  (f32: 277, i32: 41)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 25)\n",
      "  (f32: 119)\n",
      "  (f32: 26)\n",
      "  (f32: 103)\n",
      "  (f32: 26)\n",
      "  (f32: 103)\n",
      "  (f32: 38)\n",
      "  (f32: 105)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 127)\n",
      "2024-10-13 23:33:51.386622: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.250 G  ops, equivalently 3.625 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 23:34:04.615376: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.250 G  ops, equivalently 3.625 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b5_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b5_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 23:34:35.429111: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 23:34:35.429140: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 23:34:35.429367: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_efficientnet_b5_o4_r320\n",
      "2024-10-13 23:34:35.995165: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 23:34:35.995200: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_efficientnet_b5_o4_r320\n",
      "2024-10-13 23:34:36.038971: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 23:34:36.274018: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_efficientnet_b5_o4_r320\n",
      "2024-10-13 23:34:36.456801: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 1027436 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 448, Total Ops 1444, % non-converted = 31.02 %\n",
      " * 448 ARITH ops\n",
      "\n",
      "- arith.constant:  448 occurrences  (f32: 405, i32: 43)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 38)\n",
      "  (f32: 170)\n",
      "  (f32: 39)\n",
      "  (f32: 154)\n",
      "  (f32: 39)\n",
      "  (f32: 154)\n",
      "  (f32: 51)\n",
      "  (f32: 157)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 179)\n",
      "2024-10-13 23:34:38.045563: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.373 G  ops, equivalently 7.686 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 23:35:03.015619: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.373 G  ops, equivalently 7.686 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"efficientnet_b0\": \"efficientnet_b0\",\n",
    "    \"efficientnet_b3\": \"efficientnet_b3\",\n",
    "    \"efficientnet_b5\": \"efficientnet_b5\",\n",
    "}\n",
    "n_classes = 1\n",
    "num_outs = 4\n",
    "out_channel_list = [32, 96, 128]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "2024-10-13 17:31:07.598527: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 17:31:07.601422: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 17:31:07.602814: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 17:31:07.645597: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 17:31:07.647877: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 17:31:07.649216: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 17:31:08.117941: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 17:31:08.119562: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 17:31:08.121027: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 17:31:08.122430: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 49490 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:41:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b0_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b0_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 17:31:31.378432: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 17:31:31.378489: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 17:31:31.379441: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_efficientnet_b0_o4_r320_coco\n",
      "2024-10-13 17:31:31.443989: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 17:31:31.444032: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_efficientnet_b0_o4_r320_coco\n",
      "2024-10-13 17:31:31.503161: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-13 17:31:31.508059: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 17:31:31.667610: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_efficientnet_b0_o4_r320_coco\n",
      "2024-10-13 17:31:31.797175: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 417738 microseconds.\n",
      "2024-10-13 17:31:31.999702: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 215, Total Ops 665, % non-converted = 32.33 %\n",
      " * 215 ARITH ops\n",
      "\n",
      "- arith.constant:  215 occurrences  (f32: 179, i32: 36)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 15)\n",
      "  (f32: 80)\n",
      "  (f32: 16)\n",
      "  (f32: 64)\n",
      "  (f32: 16)\n",
      "  (f32: 64)\n",
      "  (f32: 28)\n",
      "  (f32: 64)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 88)\n",
      "2024-10-13 17:31:32.769765: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 3.382 G  ops, equivalently 1.691 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 17:31:40.796508: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 3.382 G  ops, equivalently 1.691 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b3_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b3_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 17:32:29.954431: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 17:32:29.954490: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 17:32:29.954814: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_efficientnet_b3_o4_r320_coco\n",
      "2024-10-13 17:32:29.980886: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 17:32:29.980936: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_efficientnet_b3_o4_r320_coco\n",
      "2024-10-13 17:32:30.023301: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 17:32:30.183412: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_efficientnet_b3_o4_r320_coco\n",
      "2024-10-13 17:32:30.328692: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 373882 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 317, Total Ops 1004, % non-converted = 31.57 %\n",
      " * 317 ARITH ops\n",
      "\n",
      "- arith.constant:  317 occurrences  (f32: 277, i32: 40)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 25)\n",
      "  (f32: 119)\n",
      "  (f32: 26)\n",
      "  (f32: 103)\n",
      "  (f32: 26)\n",
      "  (f32: 103)\n",
      "  (f32: 38)\n",
      "  (f32: 104)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 128)\n",
      "2024-10-13 17:32:31.781335: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 8.533 G  ops, equivalently 4.267 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 17:32:48.053286: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 8.533 G  ops, equivalently 4.267 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b5_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_b5_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 17:33:46.953652: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 17:33:46.953691: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 17:33:46.953937: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_efficientnet_b5_o4_r320_coco\n",
      "2024-10-13 17:33:47.534490: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 17:33:47.534526: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_efficientnet_b5_o4_r320_coco\n",
      "2024-10-13 17:33:47.651188: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 17:33:47.966614: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_efficientnet_b5_o4_r320_coco\n",
      "2024-10-13 17:33:48.162542: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 1208606 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 447, Total Ops 1443, % non-converted = 30.98 %\n",
      " * 447 ARITH ops\n",
      "\n",
      "- arith.constant:  447 occurrences  (f32: 405, i32: 42)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 38)\n",
      "  (f32: 170)\n",
      "  (f32: 39)\n",
      "  (f32: 154)\n",
      "  (f32: 39)\n",
      "  (f32: 154)\n",
      "  (f32: 51)\n",
      "  (f32: 156)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 180)\n",
      "2024-10-13 17:33:49.975380: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 16.931 G  ops, equivalently 8.465 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 17:34:16.094985: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 16.931 G  ops, equivalently 8.465 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"efficientnet_b0\": \"efficientnet_b0\",\n",
    "    \"efficientnet_b3\": \"efficientnet_b3\",\n",
    "    \"efficientnet_b5\": \"efficientnet_b5\",\n",
    "}\n",
    "n_classes = 80\n",
    "num_outs = 4\n",
    "out_channel_list = [32, 96, 128]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=80,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## EfficientNetV2FPN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "2024-10-19 23:00:27.171098: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-19 23:00:27.184268: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-19 23:00:27.185517: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-19 23:00:27.190922: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-19 23:00:27.192149: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-19 23:00:27.193335: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-19 23:00:27.433573: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-19 23:00:27.434924: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-19 23:00:27.436126: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-19 23:00:27.437293: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 49124 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:01:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_tiny_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_tiny_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-19 23:00:49.426013: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-19 23:00:49.426046: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-19 23:00:49.426766: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_efficientnet_tiny_o4_r320\n",
      "2024-10-19 23:00:49.448392: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-19 23:00:49.448426: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_efficientnet_tiny_o4_r320\n",
      "2024-10-19 23:00:49.504753: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-19 23:00:49.511220: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-19 23:00:49.705272: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_efficientnet_tiny_o4_r320\n",
      "2024-10-19 23:00:49.839443: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 412678 microseconds.\n",
      "2024-10-19 23:00:50.089661: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 373, Total Ops 1234, % non-converted = 30.23 %\n",
      " * 373 ARITH ops\n",
      "\n",
      "- arith.constant:  373 occurrences  (f32: 347, i32: 26)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 40)\n",
      "  (f32: 151)\n",
      "  (f32: 29)\n",
      "  (f32: 127)\n",
      "  (f32: 29)\n",
      "  (f32: 127)\n",
      "  (f32: 51)\n",
      "  (f32: 117)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 175)\n",
      "2024-10-19 23:00:50.790893: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 9.283 G  ops, equivalently 4.641 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-19 23:01:01.254298: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 9.283 G  ops, equivalently 4.641 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_small_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_small_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-19 23:01:25.349137: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-19 23:01:25.349181: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-19 23:01:25.349411: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_efficientnet_small_o4_r320\n",
      "2024-10-19 23:01:25.382225: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-19 23:01:25.382266: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_efficientnet_small_o4_r320\n",
      "2024-10-19 23:01:25.486771: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-19 23:01:25.727980: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_efficientnet_small_o4_r320\n",
      "2024-10-19 23:01:25.893781: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 544372 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 387, Total Ops 1278, % non-converted = 30.28 %\n",
      " * 387 ARITH ops\n",
      "\n",
      "- arith.constant:  387 occurrences  (f32: 361, i32: 26)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 41)\n",
      "  (f32: 157)\n",
      "  (f32: 30)\n",
      "  (f32: 131)\n",
      "  (f32: 30)\n",
      "  (f32: 131)\n",
      "  (f32: 52)\n",
      "  (f32: 121)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 183)\n",
      "2024-10-19 23:01:27.265640: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 14.341 G  ops, equivalently 7.170 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-19 23:01:41.841605: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 14.341 G  ops, equivalently 7.170 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"efficientnetv2_tiny\": \"efficientnetv2_rw_t\",\n",
    "    \"efficientnetv2_small\": \"efficientnetv2_rw_s\",\n",
    "}\n",
    "n_classes = 1\n",
    "num_outs = 4\n",
    "out_channel_list = [32, 64]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_tiny_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_tiny_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-19 23:02:04.837935: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-19 23:02:04.837980: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-19 23:02:04.838228: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_efficientnet_tiny_o4_r320_coco\n",
      "2024-10-19 23:02:04.862668: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-19 23:02:04.862789: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_efficientnet_tiny_o4_r320_coco\n",
      "2024-10-19 23:02:04.895117: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-19 23:02:05.046325: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_efficientnet_tiny_o4_r320_coco\n",
      "2024-10-19 23:02:05.209658: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 371431 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 372, Total Ops 1233, % non-converted = 30.17 %\n",
      " * 372 ARITH ops\n",
      "\n",
      "- arith.constant:  372 occurrences  (f32: 347, i32: 25)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 40)\n",
      "  (f32: 151)\n",
      "  (f32: 29)\n",
      "  (f32: 127)\n",
      "  (f32: 29)\n",
      "  (f32: 127)\n",
      "  (f32: 51)\n",
      "  (f32: 116)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 176)\n",
      "2024-10-19 23:02:06.261016: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 10.018 G  ops, equivalently 5.009 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-19 23:02:16.788992: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 10.018 G  ops, equivalently 5.009 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_small_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_efficientnet_small_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-19 23:02:41.541022: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-19 23:02:41.541057: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-19 23:02:41.541271: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_efficientnet_small_o4_r320_coco\n",
      "2024-10-19 23:02:42.031305: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-19 23:02:42.031345: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_efficientnet_small_o4_r320_coco\n",
      "2024-10-19 23:02:42.070241: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-19 23:02:42.239817: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_efficientnet_small_o4_r320_coco\n",
      "2024-10-19 23:02:42.395305: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 854035 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 386, Total Ops 1277, % non-converted = 30.23 %\n",
      " * 386 ARITH ops\n",
      "\n",
      "- arith.constant:  386 occurrences  (f32: 361, i32: 25)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 41)\n",
      "  (f32: 157)\n",
      "  (f32: 30)\n",
      "  (f32: 131)\n",
      "  (f32: 30)\n",
      "  (f32: 131)\n",
      "  (f32: 52)\n",
      "  (f32: 120)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 184)\n",
      "2024-10-19 23:02:43.672164: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.351 G  ops, equivalently 7.675 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-19 23:02:58.251675: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 15.351 G  ops, equivalently 7.675 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"efficientnetv2_tiny\": \"efficientnetv2_rw_t\",\n",
    "    \"efficientnetv2_small\": \"efficientnetv2_rw_s\",\n",
    "}\n",
    "n_classes = 80\n",
    "num_outs = 4\n",
    "out_channel_list = [32, 64]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=80,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## RepVGGFPN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a0_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a0_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 23:35:16.760460: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 23:35:16.760501: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 23:35:16.760766: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_repvgg_a0_o4_r320\n",
      "2024-10-13 23:35:16.778258: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 23:35:16.778295: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_repvgg_a0_o4_r320\n",
      "2024-10-13 23:35:16.805561: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 23:35:16.966405: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_repvgg_a0_o4_r320\n",
      "2024-10-13 23:35:17.098856: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 338090 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 148, Total Ops 419, % non-converted = 35.32 %\n",
      " * 148 ARITH ops\n",
      "\n",
      "- arith.constant:  148 occurrences  (f32: 141, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 62)\n",
      "  (f32: 60)\n",
      "  (f32: 17)\n",
      "  (f32: 33)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 83)\n",
      "2024-10-13 23:35:17.688051: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.771 G  ops, equivalently 3.386 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 23:35:21.555798: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.771 G  ops, equivalently 3.386 G  MACs\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a1_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a1_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 23:35:35.271245: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 23:35:35.271277: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 23:35:35.271480: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_repvgg_a1_o4_r320\n",
      "2024-10-13 23:35:35.289447: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 23:35:35.289478: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_repvgg_a1_o4_r320\n",
      "2024-10-13 23:35:35.311101: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 23:35:35.397874: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_repvgg_a1_o4_r320\n",
      "2024-10-13 23:35:35.525259: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 253780 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 148, Total Ops 419, % non-converted = 35.32 %\n",
      " * 148 ARITH ops\n",
      "\n",
      "- arith.constant:  148 occurrences  (f32: 141, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 62)\n",
      "  (f32: 60)\n",
      "  (f32: 17)\n",
      "  (f32: 33)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 83)\n",
      "2024-10-13 23:35:36.183701: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.520 G  ops, equivalently 6.760 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 23:35:42.083806: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.520 G  ops, equivalently 6.760 G  MACs\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a2_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a2_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 23:35:59.417773: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 23:35:59.417809: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 23:35:59.418016: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_repvgg_a2_o4_r320\n",
      "2024-10-13 23:35:59.456387: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 23:35:59.456424: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_repvgg_a2_o4_r320\n",
      "2024-10-13 23:35:59.549843: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 23:35:59.885643: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_repvgg_a2_o4_r320\n",
      "2024-10-13 23:36:00.229174: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 811155 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 148, Total Ops 419, % non-converted = 35.32 %\n",
      " * 148 ARITH ops\n",
      "\n",
      "- arith.constant:  148 occurrences  (f32: 141, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 62)\n",
      "  (f32: 60)\n",
      "  (f32: 17)\n",
      "  (f32: 33)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 83)\n",
      "2024-10-13 23:36:01.938122: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 27.572 G  ops, equivalently 13.786 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 23:36:12.009518: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 27.572 G  ops, equivalently 13.786 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"repvgg_a0\": \"repvgg_a0\",\n",
    "    \"repvgg_a1\": \"repvgg_a1\",\n",
    "    \"repvgg_a2\": \"repvgg_a2\",\n",
    "}\n",
    "n_classes = 1\n",
    "num_outs = 4\n",
    "out_channel_list = [32, 96, 128]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_b1_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_b1_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-17 15:58:10.004792: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-17 15:58:10.004844: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-17 15:58:10.005116: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_repvgg_b1_o4_r320\n",
      "2024-10-17 15:58:10.075413: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-17 15:58:10.075484: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_repvgg_b1_o4_r320\n",
      "2024-10-17 15:58:10.268231: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-17 15:58:10.666239: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_repvgg_b1_o4_r320\n",
      "2024-10-17 15:58:11.072057: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 1066944 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 184, Total Ops 515, % non-converted = 35.73 %\n",
      " * 184 ARITH ops\n",
      "\n",
      "- arith.constant:  184 occurrences  (f32: 177, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 80)\n",
      "  (f32: 72)\n",
      "  (f32: 23)\n",
      "  (f32: 39)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 101)\n",
      "2024-10-17 15:58:13.657390: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 58.082 G  ops, equivalently 29.041 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-17 15:58:32.119506: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 58.082 G  ops, equivalently 29.041 G  MACs\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_b1g4_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_b1g4_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-17 15:58:52.104430: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-17 15:58:52.104464: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-17 15:58:52.104669: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_repvgg_b1g4_o4_r320\n",
      "2024-10-17 15:58:52.146106: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-17 15:58:52.146139: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_repvgg_b1g4_o4_r320\n",
      "2024-10-17 15:58:52.207123: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-17 15:58:52.471375: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_repvgg_b1g4_o4_r320\n",
      "2024-10-17 15:58:52.613087: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 508421 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 264, Total Ops 751, % non-converted = 35.15 %\n",
      " * 264 ARITH ops\n",
      "\n",
      "- arith.constant:  264 occurrences  (f32: 256, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 106)\n",
      "  (f32: 26)\n",
      "  (f32: 150)\n",
      "  (f32: 23)\n",
      "  (f32: 39)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 26)\n",
      "  (f32: 101)\n",
      "2024-10-17 15:58:54.320601: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 37.644 G  ops, equivalently 18.822 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-17 15:59:10.895059: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 37.644 G  ops, equivalently 18.822 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"repvgg_b1\": \"repvgg_b1\",\n",
    "    \"repvgg_b1g4\": \"repvgg_b1g4\",\n",
    "}\n",
    "n_classes = 1\n",
    "num_outs = 4\n",
    "out_channel_list = [128, 128]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "2024-10-13 18:30:53.337418: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 18:30:53.340248: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 18:30:53.341675: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 18:30:53.349478: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 18:30:53.351828: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 18:30:53.353575: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 18:30:53.729618: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 18:30:53.730821: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 18:30:53.731822: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 18:30:53.732782: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 53111 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:41:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a0_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a0_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 18:31:11.043979: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 18:31:11.044013: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 18:31:11.044635: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_repvgg_a0_o4_r320_coco\n",
      "2024-10-13 18:31:11.056510: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 18:31:11.056538: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_repvgg_a0_o4_r320_coco\n",
      "2024-10-13 18:31:11.073716: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-13 18:31:11.077367: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 18:31:11.197657: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_repvgg_a0_o4_r320_coco\n",
      "2024-10-13 18:31:11.271668: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 227034 microseconds.\n",
      "2024-10-13 18:31:11.412979: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 147, Total Ops 418, % non-converted = 35.17 %\n",
      " * 147 ARITH ops\n",
      "\n",
      "- arith.constant:  147 occurrences  (f32: 141, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 62)\n",
      "  (f32: 60)\n",
      "  (f32: 17)\n",
      "  (f32: 33)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 84)\n",
      "2024-10-13 18:31:11.780232: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.276 G  ops, equivalently 3.638 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 18:31:15.851847: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 7.276 G  ops, equivalently 3.638 G  MACs\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a1_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a1_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 18:31:30.204222: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 18:31:30.204259: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 18:31:30.204485: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_repvgg_a1_o4_r320_coco\n",
      "2024-10-13 18:31:30.440892: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 18:31:30.440925: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_repvgg_a1_o4_r320_coco\n",
      "2024-10-13 18:31:30.467037: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 18:31:30.603480: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_repvgg_a1_o4_r320_coco\n",
      "2024-10-13 18:31:30.733153: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 528671 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 147, Total Ops 418, % non-converted = 35.17 %\n",
      " * 147 ARITH ops\n",
      "\n",
      "- arith.constant:  147 occurrences  (f32: 141, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 62)\n",
      "  (f32: 60)\n",
      "  (f32: 17)\n",
      "  (f32: 33)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 84)\n",
      "2024-10-13 18:31:31.578099: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.986 G  ops, equivalently 6.993 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 18:31:37.848912: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 13.986 G  ops, equivalently 6.993 G  MACs\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a2_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_repvgg_a2_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 18:31:58.162678: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 18:31:58.162713: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 18:31:58.162932: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_repvgg_a2_o4_r320_coco\n",
      "2024-10-13 18:31:58.730216: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 18:31:58.730254: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_repvgg_a2_o4_r320_coco\n",
      "2024-10-13 18:31:58.808900: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 18:31:58.986533: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_repvgg_a2_o4_r320_coco\n",
      "2024-10-13 18:31:59.075237: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 912305 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 147, Total Ops 418, % non-converted = 35.17 %\n",
      " * 147 ARITH ops\n",
      "\n",
      "- arith.constant:  147 occurrences  (f32: 141, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 62)\n",
      "  (f32: 60)\n",
      "  (f32: 17)\n",
      "  (f32: 33)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 84)\n",
      "2024-10-13 18:32:00.260973: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 28.117 G  ops, equivalently 14.059 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 18:32:10.421457: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 28.117 G  ops, equivalently 14.059 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"repvgg_a0\": \"repvgg_a0\",\n",
    "    \"repvgg_a1\": \"repvgg_a1\",\n",
    "    \"repvgg_a2\": \"repvgg_a2\",\n",
    "}\n",
    "n_classes = 80\n",
    "num_outs = 4\n",
    "out_channel_list = [32, 96, 128]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## ConvNextFPN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "2024-10-13 14:18:45.104907: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 14:18:45.108953: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 14:18:45.111452: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 14:18:45.118633: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 14:18:45.121758: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 14:18:45.123292: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 14:18:45.570781: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 14:18:45.574378: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 14:18:45.575365: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-13 14:18:45.576323: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 33447 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:41:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/fpn_convnext_tiny_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_convnext_tiny_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 14:19:11.717160: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 14:19:11.717192: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 14:19:11.717808: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_convnext_tiny_o4_r320_coco\n",
      "2024-10-13 14:19:12.389673: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 14:19:12.389706: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_convnext_tiny_o4_r320_coco\n",
      "2024-10-13 14:19:12.484070: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-13 14:19:12.487977: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 14:19:12.706005: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_convnext_tiny_o4_r320_coco\n",
      "2024-10-13 14:19:12.796812: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 1079005 microseconds.\n",
      "2024-10-13 14:19:13.075074: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 225, Total Ops 669, % non-converted = 33.63 %\n",
      " * 207 ARITH ops, 18 TF ops\n",
      "\n",
      "- arith.constant:  207 occurrences  (f32: 199, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "- tf.Erf:   18 occurrences  (f32: 18)\n",
      "  (f32: 68)\n",
      "  (f32: 20)\n",
      "  (f32: 18)\n",
      "  (f32: 54)\n",
      "  (f32: 44)\n",
      "  (f32: 84)\n",
      "  (f32: 29)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 22)\n",
      "  (f32: 22)\n",
      "  (f32: 68)\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.3/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.4/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.5/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.6/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.7/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.8/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): error: 'tf.Erf' op is neither a custom op nor a flex op\n",
      "error: failed while converting: 'main': \n",
      "Some ops are not supported by the native TFLite runtime, you can enable TF kernels fallback using TF Select. See instructions: https://www.tensorflow.org/lite/guide/ops_select \n",
      "TF Select ops: Erf\n",
      "Details:\n",
      "\ttf.Erf(tensor<1x10x10x3072xf32>) -> (tensor<1x10x10x3072xf32>) : {device = \"\"}\n",
      "\ttf.Erf(tensor<1x20x20x1536xf32>) -> (tensor<1x20x20x1536xf32>) : {device = \"\"}\n",
      "\ttf.Erf(tensor<1x40x40x768xf32>) -> (tensor<1x40x40x768xf32>) : {device = \"\"}\n",
      "\ttf.Erf(tensor<1x80x80x384xf32>) -> (tensor<1x80x80x384xf32>) : {device = \"\"}\n",
      "\n"
     ]
    },
    {
     "ename": "ConverterError",
     "evalue": "Could not translate MLIR to FlatBuffer. UNKNOWN: <unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.3/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.3/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.4/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.4/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.5/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.5/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.6/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.6/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.7/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.7/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.8/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.8/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: failed while converting: 'main': \nSome ops are not supported by the native TFLite runtime, you can enable TF kernels fallback using TF Select. See instructions: https://www.tensorflow.org/lite/guide/ops_select \nTF Select ops: Erf\nDetails:\n\ttf.Erf(tensor<1x10x10x3072xf32>) -> (tensor<1x10x10x3072xf32>) : {device = \"\"}\n\ttf.Erf(tensor<1x20x20x1536xf32>) -> (tensor<1x20x20x1536xf32>) : {device = \"\"}\n\ttf.Erf(tensor<1x40x40x768xf32>) -> (tensor<1x40x40x768xf32>) : {device = \"\"}\n\ttf.Erf(tensor<1x80x80x384xf32>) -> (tensor<1x80x80x384xf32>) : {device = \"\"}\n\n",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mConverterError\u001b[0m                            Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[2], line 40\u001b[0m\n\u001b[1;32m     28\u001b[0m calib_data_path \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcalibdata.npy\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m     30\u001b[0m converter \u001b[38;5;241m=\u001b[39m Converter(\n\u001b[1;32m     31\u001b[0m     model,\n\u001b[1;32m     32\u001b[0m     image_size,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m     38\u001b[0m     opset_version\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m11\u001b[39m,\n\u001b[1;32m     39\u001b[0m )\n\u001b[0;32m---> 40\u001b[0m \u001b[43mconverter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconvert\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m/mnt/ssd2/xxx/repo/object-centroid-detection/conversion/converter.py:99\u001b[0m, in \u001b[0;36mConverter.convert\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m     97\u001b[0m converter\u001b[38;5;241m.\u001b[39minference_input_type \u001b[38;5;241m=\u001b[39m tf\u001b[38;5;241m.\u001b[39muint8\n\u001b[1;32m     98\u001b[0m converter\u001b[38;5;241m.\u001b[39minference_output_type \u001b[38;5;241m=\u001b[39m tf\u001b[38;5;241m.\u001b[39muint8\n\u001b[0;32m---> 99\u001b[0m tflite_model_quant \u001b[38;5;241m=\u001b[39m \u001b[43mconverter\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mconvert\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    100\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtflite_model_path\u001b[38;5;241m.\u001b[39mparent\u001b[38;5;241m.\u001b[39mmkdir(parents\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m, exist_ok\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m    101\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtflite_model_path\u001b[38;5;241m.\u001b[39mwrite_bytes(tflite_model_quant)\n",
      "File \u001b[0;32m~/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/lite/python/lite.py:1139\u001b[0m, in \u001b[0;36m_export_metrics.<locals>.wrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1136\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(convert_func)\n\u001b[1;32m   1137\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mwrapper\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m   1138\u001b[0m   \u001b[38;5;66;03m# pylint: disable=protected-access\u001b[39;00m\n\u001b[0;32m-> 1139\u001b[0m   \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_convert_and_export_metrics\u001b[49m\u001b[43m(\u001b[49m\u001b[43mconvert_func\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/lite/python/lite.py:1093\u001b[0m, in \u001b[0;36mTFLiteConverterBase._convert_and_export_metrics\u001b[0;34m(self, convert_func, *args, **kwargs)\u001b[0m\n\u001b[1;32m   1091\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_save_conversion_params_metric()\n\u001b[1;32m   1092\u001b[0m start_time \u001b[38;5;241m=\u001b[39m time\u001b[38;5;241m.\u001b[39mprocess_time()\n\u001b[0;32m-> 1093\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mconvert_func\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1094\u001b[0m elapsed_time_ms \u001b[38;5;241m=\u001b[39m (time\u001b[38;5;241m.\u001b[39mprocess_time() \u001b[38;5;241m-\u001b[39m start_time) \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m1000\u001b[39m\n\u001b[1;32m   1095\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m result:\n",
      "File \u001b[0;32m~/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/lite/python/lite.py:1465\u001b[0m, in \u001b[0;36mTFLiteSavedModelConverterV2.convert\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m   1459\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m   1460\u001b[0m   \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_debug_info \u001b[38;5;241m=\u001b[39m _get_debug_info(\n\u001b[1;32m   1461\u001b[0m       _convert_debug_info_func(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_trackable_obj\u001b[38;5;241m.\u001b[39mgraph_debug_info),\n\u001b[1;32m   1462\u001b[0m       graph_def,\n\u001b[1;32m   1463\u001b[0m   )\n\u001b[0;32m-> 1465\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_convert_from_saved_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43mgraph_def\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m~/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/lite/python/lite.py:1331\u001b[0m, in \u001b[0;36mTFLiteConverterBaseV2._convert_from_saved_model\u001b[0;34m(self, graph_def)\u001b[0m\n\u001b[1;32m   1328\u001b[0m converter_kwargs\u001b[38;5;241m.\u001b[39mupdate(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_get_base_converter_args())\n\u001b[1;32m   1329\u001b[0m converter_kwargs\u001b[38;5;241m.\u001b[39mupdate(quant_mode\u001b[38;5;241m.\u001b[39mconverter_flags())\n\u001b[0;32m-> 1331\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43m_convert_saved_model\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mconverter_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1332\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_optimize_tflite_model(\n\u001b[1;32m   1333\u001b[0m     result, quant_mode, quant_io\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mexperimental_new_quantizer\n\u001b[1;32m   1334\u001b[0m )\n",
      "File \u001b[0;32m~/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/lite/python/convert_phase.py:212\u001b[0m, in \u001b[0;36mconvert_phase.<locals>.actual_decorator.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m    210\u001b[0m   \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m    211\u001b[0m     report_error_message(\u001b[38;5;28mstr\u001b[39m(converter_error))\n\u001b[0;32m--> 212\u001b[0m   \u001b[38;5;28;01mraise\u001b[39;00m converter_error \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m  \u001b[38;5;66;03m# Re-throws the exception.\u001b[39;00m\n\u001b[1;32m    213\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m error:\n\u001b[1;32m    214\u001b[0m   report_error_message(\u001b[38;5;28mstr\u001b[39m(error))\n",
      "File \u001b[0;32m~/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/lite/python/convert_phase.py:205\u001b[0m, in \u001b[0;36mconvert_phase.<locals>.actual_decorator.<locals>.wrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m    202\u001b[0m \u001b[38;5;129m@functools\u001b[39m\u001b[38;5;241m.\u001b[39mwraps(func)\n\u001b[1;32m    203\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mwrapper\u001b[39m(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs):\n\u001b[1;32m    204\u001b[0m   \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 205\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    206\u001b[0m   \u001b[38;5;28;01mexcept\u001b[39;00m ConverterError \u001b[38;5;28;01mas\u001b[39;00m converter_error:\n\u001b[1;32m    207\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m converter_error\u001b[38;5;241m.\u001b[39merrors:\n",
      "File \u001b[0;32m~/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/lite/python/convert.py:1001\u001b[0m, in \u001b[0;36mconvert_saved_model\u001b[0;34m(**kwargs)\u001b[0m\n\u001b[1;32m    999\u001b[0m model_flags \u001b[38;5;241m=\u001b[39m build_model_flags(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m   1000\u001b[0m conversion_flags \u001b[38;5;241m=\u001b[39m build_conversion_flags(\u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[0;32m-> 1001\u001b[0m data \u001b[38;5;241m=\u001b[39m \u001b[43mconvert\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m   1002\u001b[0m \u001b[43m    \u001b[49m\u001b[43mmodel_flags\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1003\u001b[0m \u001b[43m    \u001b[49m\u001b[43mconversion_flags\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1004\u001b[0m \u001b[43m    \u001b[49m\u001b[43minput_data_str\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m   1005\u001b[0m \u001b[43m    \u001b[49m\u001b[43mdebug_info_str\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m   1006\u001b[0m \u001b[43m    \u001b[49m\u001b[43menable_mlir_converter\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[1;32m   1007\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1008\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m data\n",
      "File \u001b[0;32m~/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/lite/python/convert.py:366\u001b[0m, in \u001b[0;36mconvert\u001b[0;34m(model_flags, conversion_flags, input_data_str, debug_info_str, enable_mlir_converter)\u001b[0m\n\u001b[1;32m    358\u001b[0m         conversion_flags\u001b[38;5;241m.\u001b[39mguarantee_all_funcs_one_use \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[1;32m    359\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m convert(\n\u001b[1;32m    360\u001b[0m             model_flags,\n\u001b[1;32m    361\u001b[0m             conversion_flags,\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    364\u001b[0m             enable_mlir_converter,\n\u001b[1;32m    365\u001b[0m         )\n\u001b[0;32m--> 366\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m converter_error\n\u001b[1;32m    368\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m _run_deprecated_conversion_binary(\n\u001b[1;32m    369\u001b[0m     model_flags\u001b[38;5;241m.\u001b[39mSerializeToString(),\n\u001b[1;32m    370\u001b[0m     conversion_flags\u001b[38;5;241m.\u001b[39mSerializeToString(),\n\u001b[1;32m    371\u001b[0m     input_data_str,\n\u001b[1;32m    372\u001b[0m     debug_info_str,\n\u001b[1;32m    373\u001b[0m )\n",
      "\u001b[0;31mConverterError\u001b[0m: Could not translate MLIR to FlatBuffer. UNKNOWN: <unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_0/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_1/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.3/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.3/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.4/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.4/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.5/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.5/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.6/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.6/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.7/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.7/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.8/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_2/blocks/blocks.8/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.0/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.1/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): 'tf.Erf' op is neither a custom op nor a flex op\n<unknown>:0: note: loc(fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"]): called from\n<unknown>:0: note: loc(callsite(callsite(fused[\"Erf:\", \"onnx_tf_prefix_/backbone/stages_3/blocks/blocks.2/mlp/act/Erf@__inference___call___2706\"] at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall@__inference_signature_wrapper_3106\"]) at fused[\"StatefulPartitionedCall:\", \"StatefulPartitionedCall\"])): Error code: ERROR_NEEDS_FLEX_OPS\n<unknown>:0: error: failed while converting: 'main': \nSome ops are not supported by the native TFLite runtime, you can enable TF kernels fallback using TF Select. See instructions: https://www.tensorflow.org/lite/guide/ops_select \nTF Select ops: Erf\nDetails:\n\ttf.Erf(tensor<1x10x10x3072xf32>) -> (tensor<1x10x10x3072xf32>) : {device = \"\"}\n\ttf.Erf(tensor<1x20x20x1536xf32>) -> (tensor<1x20x20x1536xf32>) : {device = \"\"}\n\ttf.Erf(tensor<1x40x40x768xf32>) -> (tensor<1x40x40x768xf32>) : {device = \"\"}\n\ttf.Erf(tensor<1x80x80x384xf32>) -> (tensor<1x80x80x384xf32>) : {device = \"\"}\n\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"convnext_tiny\": \"convnext_tiny\",\n",
    "}\n",
    "n_classes = 80\n",
    "num_outs = 4\n",
    "out_channel_list = [128]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## MobilenetOneFPN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobileone_s0_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobileone_s0_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 23:36:44.845476: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 23:36:44.845514: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 23:36:44.845716: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobileone_s0_o4_r320\n",
      "2024-10-13 23:36:44.861036: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 23:36:44.861067: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobileone_s0_o4_r320\n",
      "2024-10-13 23:36:44.894459: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 23:36:45.076792: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobileone_s0_o4_r320\n",
      "2024-10-13 23:36:45.268624: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 422909 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 478, Total Ops 1112, % non-converted = 42.99 %\n",
      " * 478 ARITH ops\n",
      "\n",
      "- arith.constant:  478 occurrences  (f32: 471, i32: 7)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 224)\n",
      "  (f32: 102)\n",
      "  (f32: 105)\n",
      "  (f32: 35)\n",
      "  (f32: 33)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 119)\n",
      "2024-10-13 23:36:46.108329: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.197 G  ops, equivalently 3.098 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 23:36:55.026913: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.197 G  ops, equivalently 3.098 G  MACs\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobileone_s4_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobileone_s4_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 23:37:12.416910: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 23:37:12.416949: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 23:37:12.417192: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobileone_s4_o4_r320\n",
      "2024-10-13 23:37:12.636382: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 23:37:12.636426: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobileone_s4_o4_r320\n",
      "2024-10-13 23:37:12.672693: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 23:37:12.806648: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobileone_s4_o4_r320\n",
      "2024-10-13 23:37:12.940098: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 522906 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 277, Total Ops 742, % non-converted = 37.33 %\n",
      " * 277 ARITH ops\n",
      "\n",
      "- arith.constant:  277 occurrences  (f32: 265, i32: 12)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 96)\n",
      "  (f32: 63)\n",
      "  (f32: 42)\n",
      "  (f32: 12)\n",
      "  (f32: 12)\n",
      "  (f32: 46)\n",
      "  (f32: 33)\n",
      "  (f32: 25)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 121)\n",
      "2024-10-13 23:37:13.869853: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 16.963 G  ops, equivalently 8.481 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 23:37:29.384497: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 16.963 G  ops, equivalently 8.481 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"mobileone_s0\": \"mobileone_s0\",\n",
    "    \"mobileone_s4\": \"mobileone_s4\",\n",
    "}\n",
    "n_classes = 1\n",
    "num_outs = 4\n",
    "out_channel_list = [80, 128]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y, tensor with unsupported characters which will be renamed to transpose_642_x, add_190_y, reshape_239_tensor in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobileone_s0_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobileone_s0_o4_r320_coco/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/fpn_mobileone_s0_o4_r320_coco/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Using existing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 14:10:48.242177: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 14:10:48.242222: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 14:10:48.242480: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobileone_s0_o4_r320_coco\n",
      "2024-10-13 14:10:48.375768: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 14:10:48.375803: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobileone_s0_o4_r320_coco\n",
      "2024-10-13 14:10:48.406996: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 14:10:48.578007: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobileone_s0_o4_r320_coco\n",
      "2024-10-13 14:10:48.755166: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 512687 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 476, Total Ops 1110, % non-converted = 42.88 %\n",
      " * 476 ARITH ops\n",
      "\n",
      "- arith.constant:  476 occurrences  (f32: 470, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 224)\n",
      "  (f32: 102)\n",
      "  (f32: 105)\n",
      "  (f32: 35)\n",
      "  (f32: 33)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 120)\n",
      "2024-10-13 14:10:49.711153: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.937 G  ops, equivalently 3.468 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 14:10:58.886806: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.937 G  ops, equivalently 3.468 G  MACs\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "INFO:absl:Function `__call__` contains input name(s) x, y, tensor with unsupported characters which will be renamed to transpose_336_x, add_88_y, reshape_175_tensor in the SavedModel.\n",
      "INFO:absl:Found untraced functions such as gen_tensor_dict while saving (showing 1 of 1). These functions will not be directly callable after loading.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobileone_s4_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobileone_s4_o4_r320_coco/assets\n",
      "INFO:absl:Writing fingerprint to saved_model/fpn_mobileone_s4_o4_r320_coco/fingerprint.pb\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-13 14:11:27.621693: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-13 14:11:27.621729: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-13 14:11:27.621952: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobileone_s4_o4_r320_coco\n",
      "2024-10-13 14:11:27.932223: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-13 14:11:27.932299: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobileone_s4_o4_r320_coco\n",
      "2024-10-13 14:11:27.964343: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-13 14:11:28.127106: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobileone_s4_o4_r320_coco\n",
      "2024-10-13 14:11:28.257799: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 635848 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 276, Total Ops 741, % non-converted = 37.25 %\n",
      " * 276 ARITH ops\n",
      "\n",
      "- arith.constant:  276 occurrences  (f32: 265, i32: 11)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 96)\n",
      "  (f32: 63)\n",
      "  (f32: 42)\n",
      "  (f32: 12)\n",
      "  (f32: 12)\n",
      "  (f32: 46)\n",
      "  (f32: 33)\n",
      "  (f32: 24)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 122)\n",
      "2024-10-13 14:11:29.262507: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 17.508 G  ops, equivalently 8.754 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-13 14:11:46.248675: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 17.508 G  ops, equivalently 8.754 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    \"mobileone_s0\": \"mobileone_s0\",\n",
    "    \"mobileone_s4\": \"mobileone_s4\",\n",
    "}\n",
    "n_classes = 80\n",
    "num_outs = 4\n",
    "out_channel_list = [80, 128]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## MobilenetV2FPN\n",
    "\n",
    "by transformers api"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py:248: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  in_height = int(features.shape[-2])\n",
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py:249: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  in_width = int(features.shape[-1])\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/torch/onnx/_internal/jit_utils.py:307: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.)\n",
      "  _C._jit_pass_onnx_node_shape_type_inference(node, params_dict, opset_version)\n",
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/torch/onnx/utils.py:702: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.)\n",
      "  _C._jit_pass_onnx_graph_shape_type_inference(\n",
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/torch/onnx/utils.py:1209: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at ../torch/csrc/jit/passes/onnx/constant_fold.cpp:179.)\n",
      "  _C._jit_pass_onnx_graph_shape_type_inference(\n",
      "2024-10-17 15:52:03.800168: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 15:52:03.802910: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 15:52:03.804968: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 15:52:03.811641: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 15:52:03.813883: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 15:52:03.816112: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 15:52:05.195403: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 15:52:05.197209: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 15:52:05.199039: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 15:52:05.200679: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 23634 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:c1:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv2_075_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv2_075_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-17 15:52:40.514885: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-17 15:52:40.514914: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-17 15:52:40.515531: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv2_075_o4_r320\n",
      "2024-10-17 15:52:40.561516: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-17 15:52:40.561540: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv2_075_o4_r320\n",
      "2024-10-17 15:52:40.622117: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-17 15:52:40.632996: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-17 15:52:40.945312: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv2_075_o4_r320\n",
      "2024-10-17 15:52:41.235109: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 719580 microseconds.\n",
      "2024-10-17 15:52:41.589741: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 89, Total Ops 274, % non-converted = 32.48 %\n",
      " * 89 ARITH ops\n",
      "\n",
      "- arith.constant:   89 occurrences  (f32: 81, i32: 8)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 16)\n",
      "  (f32: 50)\n",
      "  (f32: 17)\n",
      "  (f32: 11)\n",
      "  (f32: 18)\n",
      "  (f32: 1)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 57)\n",
      "2024-10-17 15:52:41.992633: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.732 G  ops, equivalently 0.866 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-17 15:52:44.887808: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.732 G  ops, equivalently 0.866 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import MobileNetV2FPN\n",
    "\n",
    "image_size = (320, 320)\n",
    "width_multiplier_dict = {\n",
    "    \"mobilenetv2_075\": 0.75,\n",
    "}\n",
    "n_classes = 1\n",
    "num_outs = 4\n",
    "out_channel_list = [\n",
    "    64,\n",
    "]\n",
    "\n",
    "for backbone_key, out_channel in zip(width_multiplier_dict.keys(), out_channel_list):\n",
    "    width_multiplier = width_multiplier_dict[backbone_key]\n",
    "    model = MobileNetV2FPN(\n",
    "        width_multiplier=width_multiplier,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## MobilenetV123FPN\n",
    "\n",
    "by timm api"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/albumentations/__init__.py:13: UserWarning: A new version of Albumentations is available: 1.4.18 (you have 1.4.16). Upgrade using: pip install -U albumentations. To disable automatic update checks, set the environment variable NO_ALBUMENTATIONS_UPDATE to 1.\n",
      "  check_for_updates()\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/decode_heads/decode_head.py:137: UserWarning: threshold is not defined for binary, and defaultsto 0.3\n",
      "  warnings.warn(\"threshold is not defined for binary, and defaults\" \"to 0.3\")\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n",
      "2024-10-17 21:03:23.587047: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 21:03:23.591195: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 21:03:23.592694: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 21:03:23.599172: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 21:03:23.601124: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 21:03:23.602746: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 21:03:24.206457: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 21:03:24.215304: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 21:03:24.219934: I external/local_xla/xla/stream_executor/cuda/cuda_executor.cc:901] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero. See more at https://github.com/torvalds/linux/blob/v6.0/Documentation/ABI/testing/sysfs-bus-pci#L344-L355\n",
      "2024-10-17 21:03:24.230467: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 22594 MB memory:  -> device: 0, name: NVIDIA A100 80GB PCIe, pci bus id: 0000:41:00.0, compute capability: 8.0\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/chenxin/anaconda3/envs/ocd/lib/python3.10/site-packages/tensorflow/python/util/dispatch.py:1260: resize_nearest_neighbor (from tensorflow.python.ops.image_ops_impl) is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` instead.\n",
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv3_large_075_o4_r320/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv3_large_075_o4_r320/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-17 21:03:40.109609: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-17 21:03:40.109641: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-17 21:03:40.110246: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv3_large_075_o4_r320\n",
      "2024-10-17 21:03:40.116550: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-17 21:03:40.116571: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv3_large_075_o4_r320\n",
      "2024-10-17 21:03:40.134835: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled\n",
      "2024-10-17 21:03:40.138488: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-17 21:03:40.236384: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv3_large_075_o4_r320\n",
      "2024-10-17 21:03:40.307747: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 197504 microseconds.\n",
      "2024-10-17 21:03:40.415570: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 122, Total Ops 487, % non-converted = 25.05 %\n",
      " * 122 ARITH ops\n",
      "\n",
      "- arith.constant:  122 occurrences  (f32: 103, i32: 19)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 36)\n",
      "  (f32: 63)\n",
      "  (f32: 15)\n",
      "  (f32: 8)\n",
      "  (f32: 28)\n",
      "  (f32: 48)\n",
      "  (f32: 27)\n",
      "  (f32: 28)\n",
      "  (f32: 17)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 80)\n",
      "2024-10-17 21:03:40.704229: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.480 G  ops, equivalently 0.740 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-17 21:03:44.126235: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 1.480 G  ops, equivalently 0.740 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    # \"mobilenetv1\": \"mobilenetv1_100\",\n",
    "    # \"mobilenetv2_050\": \"mobilenetv2_050\",\n",
    "    # \"mobilenetv2_075\": \"mobilenetv2_075\",\n",
    "    # \"mobilenetv2_100\": \"mobilenetv2_100\",\n",
    "    # \"mobilenetv2_140\": \"mobilenetv2_140\",\n",
    "    # \"mobilenetv3_small_100\": \"mobilenetv3_small_100\",\n",
    "    \"mobilenetv3_large_075\": \"mobilenetv3_large_075\",\n",
    "    # \"mobilenetv3_large_100\": \"mobilenetv3_large_100\",\n",
    "}\n",
    "n_classes = 1\n",
    "num_outs = 4\n",
    "out_channel_list = [\n",
    "    # # 16,\n",
    "    # 64,\n",
    "    # 64,\n",
    "    # 64,\n",
    "    # 128,\n",
    "    # 64,\n",
    "    64,\n",
    "    # 128,\n",
    "]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv2_050_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv2_050_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-16 00:24:01.626613: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-16 00:24:01.626674: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-16 00:24:01.626944: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv2_050_o4_r320_coco\n",
      "2024-10-16 00:24:01.633900: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-16 00:24:01.633940: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv2_050_o4_r320_coco\n",
      "2024-10-16 00:24:01.646263: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-16 00:24:01.716484: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv2_050_o4_r320_coco\n",
      "2024-10-16 00:24:01.788604: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 161664 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 127, Total Ops 312, % non-converted = 40.71 %\n",
      " * 127 ARITH ops\n",
      "\n",
      "- arith.constant:  127 occurrences  (f32: 121, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 16)\n",
      "  (f32: 50)\n",
      "  (f32: 17)\n",
      "  (f32: 29)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 58)\n",
      "2024-10-16 00:24:02.103326: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.290 G  ops, equivalently 1.145 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-16 00:24:04.439692: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.290 G  ops, equivalently 1.145 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv2_100_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv2_100_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-16 00:24:13.047465: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-16 00:24:13.047500: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-16 00:24:13.047689: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv2_100_o4_r320_coco\n",
      "2024-10-16 00:24:13.054041: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-16 00:24:13.054068: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv2_100_o4_r320_coco\n",
      "2024-10-16 00:24:13.066348: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-16 00:24:13.139143: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv2_100_o4_r320_coco\n",
      "2024-10-16 00:24:13.223022: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 175335 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 127, Total Ops 312, % non-converted = 40.71 %\n",
      " * 127 ARITH ops\n",
      "\n",
      "- arith.constant:  127 occurrences  (f32: 121, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 16)\n",
      "  (f32: 50)\n",
      "  (f32: 17)\n",
      "  (f32: 29)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 58)\n",
      "2024-10-16 00:24:13.537618: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 3.101 G  ops, equivalently 1.551 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-16 00:24:17.019421: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 3.101 G  ops, equivalently 1.551 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (bn2.bias, bn2.num_batches_tracked, bn2.running_mean, bn2.running_var, bn2.weight, classifier.bias, classifier.weight, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv2_140_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv2_140_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-16 00:24:26.663052: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-16 00:24:26.663085: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-16 00:24:26.663282: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv2_140_o4_r320_coco\n",
      "2024-10-16 00:24:26.673097: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-16 00:24:26.673118: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv2_140_o4_r320_coco\n",
      "2024-10-16 00:24:26.687445: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-16 00:24:26.756855: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv2_140_o4_r320_coco\n",
      "2024-10-16 00:24:26.825231: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 161950 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 127, Total Ops 312, % non-converted = 40.71 %\n",
      " * 127 ARITH ops\n",
      "\n",
      "- arith.constant:  127 occurrences  (f32: 121, i32: 6)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 16)\n",
      "  (f32: 50)\n",
      "  (f32: 17)\n",
      "  (f32: 29)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 58)\n",
      "2024-10-16 00:24:27.191162: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.902 G  ops, equivalently 3.451 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-16 00:24:32.507588: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 6.902 G  ops, equivalently 3.451 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.bias, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv3_small_100_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv3_small_100_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-16 00:24:40.293655: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-16 00:24:40.293688: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-16 00:24:40.293875: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv3_small_100_o4_r320_coco\n",
      "2024-10-16 00:24:40.299672: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-16 00:24:40.299701: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv3_small_100_o4_r320_coco\n",
      "2024-10-16 00:24:40.311870: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-16 00:24:40.380834: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv3_small_100_o4_r320_coco\n",
      "2024-10-16 00:24:40.449545: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 155672 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 148, Total Ops 476, % non-converted = 31.09 %\n",
      " * 148 ARITH ops\n",
      "\n",
      "- arith.constant:  148 occurrences  (f32: 126, i32: 22)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 30)\n",
      "  (f32: 57)\n",
      "  (f32: 11)\n",
      "  (f32: 9)\n",
      "  (f32: 27)\n",
      "  (f32: 45)\n",
      "  (f32: 23)\n",
      "  (f32: 27)\n",
      "  (f32: 18)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 66)\n",
      "2024-10-16 00:24:40.761896: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.171 G  ops, equivalently 1.085 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-16 00:24:43.097108: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.171 G  ops, equivalently 1.085 G  MACs\n",
      "WARNING:timm.models._builder:Unexpected keys (classifier.bias, classifier.weight, conv_head.bias, conv_head.weight) found while loading pretrained weights. This may be expected if model is being adapted.\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/losses/cross_entropy_loss.py:250: UserWarning: Default ``avg_non_ignore`` is False, if you would like to ignore the certain label and average loss over non-ignore labels, which is the same with PyTorch official cross_entropy, set ``avg_non_ignore=True``.\n",
      "  warnings.warn(\n",
      "/mnt/ssd2/xxx/repo/mmsegmentation/mmseg/models/utils/wrappers.py:48: TracerWarning: Converting a tensor to a Python integer might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n",
      "  size = [int(t * self.scale_factor) for t in x.shape[-2:]]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv3_large_100_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:tensorflow:Assets written to: saved_model/fpn_mobilenetv3_large_100_o4_r320_coco/assets\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Preparing calibration data\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "2024-10-16 00:24:51.904356: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:378] Ignored output_format.\n",
      "2024-10-16 00:24:51.904393: W tensorflow/compiler/mlir/lite/python/tf_tfl_flatbuffer_helpers.cc:381] Ignored drop_control_dependency.\n",
      "2024-10-16 00:24:51.904584: I tensorflow/cc/saved_model/reader.cc:83] Reading SavedModel from: saved_model/fpn_mobilenetv3_large_100_o4_r320_coco\n",
      "2024-10-16 00:24:51.912535: I tensorflow/cc/saved_model/reader.cc:51] Reading meta graph with tags { serve }\n",
      "2024-10-16 00:24:51.912557: I tensorflow/cc/saved_model/reader.cc:146] Reading SavedModel debug info (if present) from: saved_model/fpn_mobilenetv3_large_100_o4_r320_coco\n",
      "2024-10-16 00:24:51.926002: I tensorflow/cc/saved_model/loader.cc:233] Restoring SavedModel bundle.\n",
      "2024-10-16 00:24:52.000647: I tensorflow/cc/saved_model/loader.cc:217] Running initialization op on SavedModel bundle at path: saved_model/fpn_mobilenetv3_large_100_o4_r320_coco\n",
      "2024-10-16 00:24:52.074411: I tensorflow/cc/saved_model/loader.cc:316] SavedModel load for tags { serve }; Status: success: OK. Took 169827 microseconds.\n",
      "Summary on the non-converted ops:\n",
      "---------------------------------\n",
      " * Accepted dialects: tfl, builtin, func\n",
      " * Non-Converted Ops: 164, Total Ops 529, % non-converted = 31.00 %\n",
      " * 164 ARITH ops\n",
      "\n",
      "- arith.constant:  164 occurrences  (f32: 146, i32: 18)\n",
      "\n",
      "\n",
      "\n",
      "  (f32: 36)\n",
      "  (f32: 63)\n",
      "  (f32: 15)\n",
      "  (f32: 8)\n",
      "  (f32: 28)\n",
      "  (f32: 48)\n",
      "  (f32: 27)\n",
      "  (f32: 28)\n",
      "  (f32: 16)\n",
      "  (f32: 9)\n",
      "  (f32: 3)\n",
      "  (f32: 81)\n",
      "2024-10-16 00:24:52.484554: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.848 G  ops, equivalently 1.424 G  MACs\n",
      "fully_quantize: 0, inference_type: 6, input_inference_type: UINT8, output_inference_type: UINT8\n",
      "2024-10-16 00:24:56.768165: I tensorflow/compiler/mlir/lite/flatbuffer_export.cc:2989] Estimated count of arithmetic ops: 2.848 G  ops, equivalently 1.424 G  MACs\n"
     ]
    }
   ],
   "source": [
    "from models.fpn import OCDFPN\n",
    "\n",
    "\n",
    "image_size = (320, 320)\n",
    "backbone_dict = {\n",
    "    # \"mobilenetv1\": \"mobilenetv1_100\",\n",
    "    \"mobilenetv2_050\": \"mobilenetv2_050\",\n",
    "    \"mobilenetv2_100\": \"mobilenetv2_100\",\n",
    "    \"mobilenetv2_140\": \"mobilenetv2_140\",\n",
    "    \"mobilenetv3_small_100\": \"mobilenetv3_small_100\",\n",
    "    \"mobilenetv3_large_100\": \"tf_mobilenetv3_large_100\",\n",
    "    \"mobilenetv3_large_100\": \"mobilenetv3_large_100\",\n",
    "}\n",
    "n_classes = 80\n",
    "num_outs = 4\n",
    "out_channel_list = [\n",
    "    # 16,\n",
    "    64,\n",
    "    64,\n",
    "    128,\n",
    "    64,\n",
    "    64,\n",
    "    128,\n",
    "]\n",
    "\n",
    "for backbone_key, out_channel in zip(backbone_dict.keys(), out_channel_list):\n",
    "    backbone_value = backbone_dict[backbone_key]\n",
    "    model = OCDFPN(\n",
    "        backbone=backbone_value,\n",
    "        n_classes=n_classes,\n",
    "        num_outs=num_outs,\n",
    "        out_channel=out_channel,\n",
    "    )\n",
    "\n",
    "    model_name = f\"fpn_{backbone_key}_o{num_outs}_r{image_size[0]}_coco\"\n",
    "    torch_model_path = None  # use initialized model\n",
    "    onnx_model_path = f\"saved_model/{model_name}/{model_name}.onnx\"\n",
    "    tf_folder_path = f\"saved_model/{model_name}\"\n",
    "    tflite_model_path = onnx_model_path.replace(\n",
    "        \".onnx\", \"_full_integer_quant_uint8.tflite\"\n",
    "    )\n",
    "    calib_data_path = \"calibdata.npy\"\n",
    "\n",
    "    converter = Converter(\n",
    "        model,\n",
    "        image_size,\n",
    "        torch_model_path,\n",
    "        onnx_model_path,\n",
    "        tf_folder_path,\n",
    "        tflite_model_path,\n",
    "        calib_data_path,\n",
    "        opset_version=11,\n",
    "    )\n",
    "    converter.convert()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "ocdet",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
