{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "get start\n",
      "Loading ONNX file from path ../onnx_models/Multilingual_MiniLM_L12.onnx...\n",
      "Beginning ONNX file parsing\n",
      "[02/26/2022-21:35:06] [TRT] [W] onnx2trt_utils.cpp:366: Your ONNX model has been generated with INT64 weights, while TensorRT does not natively support INT64. Attempting to cast down to INT32.\n",
      "[02/26/2022-21:35:09] [TRT] [W] Output type must be INT32 for shape outputs\n",
      "[02/26/2022-21:35:09] [TRT] [W] Output type must be INT32 for shape outputs\n",
      "[02/26/2022-21:35:09] [TRT] [W] Output type must be INT32 for shape outputs\n",
      "[02/26/2022-21:35:09] [TRT] [W] Output type must be INT32 for shape outputs\n",
      "raw shape of input_ids is:  (-1, -1)\n",
      "Completed parsing of ONNX file\n",
      "Building an engine from file ../onnx_models/Multilingual_MiniLM_L12.onnx; this may take a while...\n",
      "[02/26/2022-21:35:09] [TRT] [W] Half2 support requested on hardware without native FP16 support, performance will be negatively affected.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_118712/4273623961.py:47: DeprecationWarning: Use build_serialized_network instead.\n",
      "  engine = builder.build_engine(network,config)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[02/26/2022-21:35:10] [TRT] [W] TensorRT was linked against cuBLAS/cuBLAS LT 11.6.5 but loaded cuBLAS/cuBLAS LT 11.6.1\n",
      "[02/26/2022-21:35:10] [TRT] [W] Myelin graph with multiple dynamic values may have poor performance if they differ. Dynamic values are: \n",
      "[02/26/2022-21:35:10] [TRT] [W]  (# 1 (SHAPE input_ids))\n",
      "[02/26/2022-21:35:10] [TRT] [W]  (# 0 (SHAPE attention_mask))\n",
      "[02/26/2022-21:35:29] [TRT] [W] Myelin graph with multiple dynamic values may have poor performance if they differ. Dynamic values are: \n",
      "[02/26/2022-21:35:29] [TRT] [W]  (# 1 (SHAPE input_ids))\n",
      "[02/26/2022-21:35:29] [TRT] [W]  (# 0 (SHAPE attention_mask))\n",
      "[02/26/2022-21:35:34] [TRT] [W] TensorRT was linked against cuBLAS/cuBLAS LT 11.6.5 but loaded cuBLAS/cuBLAS LT 11.6.1\n",
      "Completed creating Engine\n"
     ]
    }
   ],
   "source": [
    "import sys\n",
    "\n",
    "import os\n",
    "import argparse\n",
    "import tensorrt as trt\n",
    "\n",
    "EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)\n",
    "\n",
    "\n",
    "output_dir = os.path.join(\"..\", \"onnx_models\")\n",
    "\n",
    "\n",
    "onnx_file_path = os.path.join(output_dir, \"Multilingual_MiniLM_L12.onnx\")\n",
    "engine_file_path = os.path.join(output_dir, \"test_v1.plan\")\n",
    "print('get start')\n",
    "TRT_LOGGER = trt.Logger()\n",
    "with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:\n",
    "    config = builder.create_builder_config()\n",
    "    #builder.max_workspace_size =( 1 << 30 ) * 2\n",
    "    config.max_workspace_size =( 1 << 20 ) * 3 * 1024 # 3GB，可以根据需求改的更大\n",
    "    builder.max_batch_size = 128\n",
    "    config.set_flag(trt.BuilderFlag.FP16)\n",
    "    #builder.fp16_mode = True\n",
    "    # Parse model file\n",
    "if not os.path.exists(onnx_file_path):\n",
    "    print('ONNX file {} not found, please run yolov3_to_onnx.py first to generate it.'.format(onnx_file_path))\n",
    "    exit(0)\n",
    "print('Loading ONNX file from path {}...'.format(onnx_file_path))\n",
    "with open(onnx_file_path, 'rb') as model:\n",
    "    print('Beginning ONNX file parsing')\n",
    "    if not parser.parse(model.read()):\n",
    "        print ('ERROR: Failed to parse the ONNX file.')\n",
    "        for error in range(parser.num_errors):\n",
    "            print (parser.get_error(error))\n",
    "print(f\"raw shape of {network.get_input(0).name} is: \", network.get_input(0).shape)\n",
    "profile = builder.create_optimization_profile()\n",
    "for temp_name in ['input_ids', 'attention_mask','token_type_ids']:\n",
    "    profile.set_shape(\n",
    "        input=temp_name,\n",
    "        min=(1,1),\n",
    "        opt=(1,64),\n",
    "        max=(64,128),\n",
    "    )\n",
    "config.add_optimization_profile(profile)\n",
    "print('Completed parsing of ONNX file')\n",
    "print('Building an engine from file {}; this may take a while...'.format(onnx_file_path))\n",
    "engine = builder.build_engine(network,config)\n",
    "print(\"Completed creating Engine\")\n",
    "with open(engine_file_path, \"wb\") as f:\n",
    "    f.write(engine.serialize())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false,
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[02/26/2022-21:35:36] [TRT] [I] The logger passed into createInferRuntime differs from one already provided for an existing builder, runtime, or refitter. Uses of the global logger, returned by nvinfer1::getLogger(), will return the existing value.\n",
      "\n",
      "[02/26/2022-21:35:36] [TRT] [I] [MemUsageChange] Init CUDA: CPU +0, GPU +0, now: CPU 1574, GPU 1891 (MiB)\n",
      "[02/26/2022-21:35:37] [TRT] [I] Loaded engine size: 901 MiB\n",
      "[02/26/2022-21:35:37] [TRT] [W] TensorRT was linked against cuBLAS/cuBLAS LT 11.6.5 but loaded cuBLAS/cuBLAS LT 11.6.1\n",
      "[02/26/2022-21:35:37] [TRT] [I] [MemUsageChange] Init cuBLAS/cuBLASLt: CPU +0, GPU +10, now: CPU 2928, GPU 2351 (MiB)\n",
      "[02/26/2022-21:35:37] [TRT] [I] [MemUsageChange] Init cuDNN: CPU +0, GPU +8, now: CPU 2928, GPU 2359 (MiB)\n",
      "[02/26/2022-21:35:37] [TRT] [I] [MemUsageChange] TensorRT-managed allocation in engine deserialization: CPU +0, GPU +448, now: CPU 0, GPU 960 (MiB)\n",
      "Engine Info:\n",
      "0 type:    input\n",
      "  binding: input_ids \n",
      "  data:    int32\n",
      "  shape:   [128, -1, -1] => 1 \n",
      "\n",
      "1 type:    input\n",
      "  binding: attention_mask \n",
      "  data:    int32\n",
      "  shape:   [128, -1, -1] => 1 \n",
      "\n",
      "2 type:    input\n",
      "  binding: token_type_ids \n",
      "  data:    int32\n",
      "  shape:   [128, -1, -1] => 1 \n",
      "\n",
      "3 type:    output\n",
      "  binding: start \n",
      "  data:    float32\n",
      "  shape:   [128, -1, -1, 384] => 384 \n",
      "\n",
      "4 type:    output\n",
      "  binding: end \n",
      "  data:    float32\n",
      "  shape:   [128, -1, 384] => 384 \n",
      "\n"
     ]
    }
   ],
   "source": [
    "import tensorrt as trt\n",
    "import sys\n",
    "import numpy as np\n",
    "trt_logger = trt.Logger(trt.Logger.INFO)\n",
    "runtime = trt.Runtime(trt_logger)\n",
    "with open(engine_file_path, \"rb\") as f:\n",
    "    engine = runtime.deserialize_cuda_engine(f.read())\n",
    "print(\"Engine Info:\")\n",
    "for i, binding in enumerate(engine):\n",
    "    shape = [engine.max_batch_size, *engine.get_binding_shape(binding)]\n",
    "    dtype = trt.nptype(engine.get_binding_dtype(binding))\n",
    "    volume = abs(trt.volume(engine.get_binding_shape(binding)))\n",
    "    if engine.binding_is_input(binding):\n",
    "        desc = \"input\"\n",
    "    else:\n",
    "        desc = \"output\"\n",
    "    print(f\"{i} type:    {desc}\\n  binding: {binding} \\n  data:    {np.dtype(dtype).name}\\n  shape:   {shape} => {volume} \\n\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "../models/paraphrase-multilingual-MiniLM-L12-v2/0_Transformer\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'input_ids': tensor([[    0, 73014,  1322,     2]]), 'token_type_ids': tensor([[0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1]])}"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import os\n",
    "import json\n",
    "\n",
    "# Here we use paraphrase-multilingual-MiniLM-L12-v2 for demo.\n",
    "big_model_path = \"../models/paraphrase-multilingual-MiniLM-L12-v2\"\n",
    "\n",
    "\n",
    "modules_json_path = os.path.join(big_model_path, 'modules.json')\n",
    "with open(modules_json_path) as fIn:\n",
    "    modules_config = json.load(fIn)\n",
    "\n",
    "tf_from_s_path = os.path.join(big_model_path, modules_config[0].get('path'))\n",
    "print(tf_from_s_path)\n",
    "\n",
    "\n",
    "\n",
    "max_seq_length = 128\n",
    "doc_stride = 128\n",
    "max_query_length = 64\n",
    "\n",
    "# Enable overwrite to export onnx model and download latest script each time when running this notebook.\n",
    "enable_overwrite = True\n",
    "\n",
    "# Total samples to inference. It shall be large enough to get stable latency measurement.\n",
    "total_samples = 1000\n",
    "\n",
    "cache_dir = os.path.join(\".\", \"cache_models\")\n",
    "cache_dir\n",
    "if not os.path.exists(cache_dir):\n",
    "    os.makedirs(cache_dir)\n",
    "\n",
    "# # Load pretrained model and tokenizer\n",
    "\n",
    "from transformers import (AutoConfig, AutoModel, AutoTokenizer)\n",
    "\n",
    "# Load pretrained model and tokenizer\n",
    "config_class, model_class, tokenizer_class = (AutoConfig, AutoModel, AutoTokenizer)\n",
    "\n",
    "config = config_class.from_pretrained(tf_from_s_path, cache_dir=cache_dir)\n",
    "tokenizer = tokenizer_class.from_pretrained(tf_from_s_path, do_lower_case=True, cache_dir=cache_dir)\n",
    "model = model_class.from_pretrained(tf_from_s_path, from_tf=False, config=config, cache_dir=cache_dir)\n",
    "\n",
    "\n",
    "# Get the first example data to run the model and export it to ONNX\n",
    "\n",
    "st = ['您好']\n",
    "inputs = tokenizer(\n",
    "    st,\n",
    "    padding=True,\n",
    "    truncation=True,\n",
    "    max_length=512,\n",
    "    return_tensors=\"pt\"\n",
    ")\n",
    "inputs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[02/25/2022-09:16:05] [TRT] [W] TensorRT was linked against cuBLAS/cuBLAS LT 11.6.5 but loaded cuBLAS/cuBLAS LT 11.3.1\n",
      "[02/25/2022-09:16:05] [TRT] [I] [MemUsageChange] Init cuBLAS/cuBLASLt: CPU +0, GPU +8, now: CPU 5149, GPU 2071 (MiB)\n",
      "[02/25/2022-09:16:05] [TRT] [I] [MemUsageChange] Init cuDNN: CPU +0, GPU +10, now: CPU 5149, GPU 2081 (MiB)\n",
      "[02/25/2022-09:16:05] [TRT] [W] TensorRT was linked against cuDNN 8.2.1 but loaded cuDNN 8.1.1\n",
      "[02/25/2022-09:16:06] [TRT] [I] [MemUsageChange] TensorRT-managed allocation in IExecutionContext creation: CPU +0, GPU +162, now: CPU 0, GPU 386 (MiB)\n"
     ]
    }
   ],
   "source": [
    "context = engine.create_execution_context()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "# inputs_dim_name = ['inputs_ids', 'attention_mask', 'token_type_ids']\n",
    "\n",
    "# outputs_dims_name = ['start', 'end']\n",
    "\n",
    "\n",
    "# input_idx = engine[\"inputs_ids\"]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "#  Copyright 2022, Lefebvre Dalloz Services\n",
    "#\n",
    "#  Licensed under the Apache License, Version 2.0 (the \"License\");\n",
    "#  you may not use this file except in compliance with the License.\n",
    "#  You may obtain a copy of the License at\n",
    "#\n",
    "#      http://www.apache.org/licenses/LICENSE-2.0\n",
    "#\n",
    "#  Unless required by applicable law or agreed to in writing, software\n",
    "#  distributed under the License is distributed on an \"AS IS\" BASIS,\n",
    "#  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
    "#  See the License for the specific language governing permissions and\n",
    "#  limitations under the License.\n",
    "\n",
    "\"\"\"\n",
    "All the tooling to ease TensorRT usage.\n",
    "\"\"\"\n",
    "\n",
    "from typing import Callable, Dict, List, OrderedDict, Tuple\n",
    "\n",
    "import tensorrt as trt\n",
    "import torch\n",
    "from tensorrt import ICudaEngine, IExecutionContext\n",
    "from tensorrt.tensorrt import (\n",
    "    Builder,\n",
    "    IBuilderConfig,\n",
    "    IElementWiseLayer,\n",
    "    ILayer,\n",
    "    INetworkDefinition,\n",
    "    IOptimizationProfile,\n",
    "    IReduceLayer,\n",
    "    Logger,\n",
    "    OnnxParser,\n",
    "    Runtime,\n",
    ")\n",
    "\n",
    "\n",
    "def fix_fp16_network(network_definition: INetworkDefinition) -> INetworkDefinition:\n",
    "    \"\"\"\n",
    "    Mixed precision on TensorRT can generate scores very far from Pytorch because of some operator being saturated.\n",
    "    Indeed, FP16 can't store very large and very small numbers like FP32.\n",
    "    Here, we search for some patterns of operators to keep in FP32, in most cases, it is enough to fix the inference\n",
    "    and don't hurt performances.\n",
    "    :param network_definition: graph generated by TensorRT after parsing ONNX file (during the model building)\n",
    "    :return: patched network definition\n",
    "    \"\"\"\n",
    "    # search for patterns which may overflow in FP16 precision, we force FP32 precisions for those nodes\n",
    "    for layer_index in range(network_definition.num_layers - 1):\n",
    "        layer: ILayer = network_definition.get_layer(layer_index)\n",
    "        next_layer: ILayer = network_definition.get_layer(layer_index + 1)\n",
    "        # POW operation usually followed by mean reduce\n",
    "        if layer.type == trt.LayerType.ELEMENTWISE and next_layer.type == trt.LayerType.REDUCE:\n",
    "            # casting to get access to op attribute\n",
    "            layer.__class__ = IElementWiseLayer\n",
    "            next_layer.__class__ = IReduceLayer\n",
    "            if layer.op == trt.ElementWiseOperation.POW:\n",
    "                layer.precision = trt.DataType.FLOAT\n",
    "                next_layer.precision = trt.DataType.FLOAT\n",
    "            layer.set_output_type(index=0, dtype=trt.DataType.FLOAT)\n",
    "            next_layer.set_output_type(index=0, dtype=trt.DataType.FLOAT)\n",
    "    return network_definition\n",
    "\n",
    "\n",
    "def build_engine(\n",
    "    runtime: Runtime,\n",
    "    onnx_file_path: str,\n",
    "    logger: Logger,\n",
    "    min_shape: Tuple[int, int],\n",
    "    optimal_shape: Tuple[int, int],\n",
    "    max_shape: Tuple[int, int],\n",
    "    workspace_size: int,\n",
    "    fp16: bool,\n",
    "    int8: bool,\n",
    ") -> ICudaEngine:\n",
    "    \"\"\"\n",
    "    Convert ONNX file to TensorRT engine.\n",
    "    It supports dynamic shape, however it's advised to keep sequence length fix as it hurts performance otherwise.\n",
    "    Dynamic batch size don't hurt performance and is highly advised.\n",
    "    :param runtime: global variable shared accross inference call / model building\n",
    "    :param onnx_file_path: path to the ONNX file\n",
    "    :param logger: specific logger to TensorRT\n",
    "    :param min_shape: the minimal shape of input tensors. It's advised to set first dimension (batch size) to 1\n",
    "    :param optimal_shape: input tensor shape used for optimizations\n",
    "    :param max_shape: maximal input tensor shape\n",
    "    :param workspace_size: GPU memory to use during the building, more is always better. If there is not enough memory,\n",
    "    some optimization may fail, and the whole conversion process will crash.\n",
    "    :param fp16: enable FP16 precision, it usually provide a 20-30% boost compared to ONNX Runtime.\n",
    "    :param int8: enable INT-8 quantization, best performance but model should have been quantized.\n",
    "    :return: TensorRT engine to use during inference\n",
    "    \"\"\"\n",
    "    with trt.Builder(logger) as builder:  # type: Builder\n",
    "        with builder.create_network(\n",
    "            flags=1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)\n",
    "        ) as network_definition:  # type: INetworkDefinition\n",
    "            with trt.OnnxParser(network_definition, logger) as parser:  # type: OnnxParser\n",
    "                builder.max_batch_size = max_shape[0]  # max batch size\n",
    "                config: IBuilderConfig = builder.create_builder_config()\n",
    "                config.max_workspace_size = workspace_size\n",
    "                # to enable complete trt inspector debugging, only for TensorRT >= 8.2\n",
    "                # config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED\n",
    "                # disable CUDNN optimizations\n",
    "                config.set_tactic_sources(\n",
    "                    tactic_sources=1 << int(trt.TacticSource.CUBLAS) | 1 << int(trt.TacticSource.CUBLAS_LT)\n",
    "                )\n",
    "                if int8:\n",
    "                    config.set_flag(trt.BuilderFlag.INT8)\n",
    "                if fp16:\n",
    "                    config.set_flag(trt.BuilderFlag.FP16)\n",
    "                config.set_flag(trt.BuilderFlag.DISABLE_TIMING_CACHE)\n",
    "                # https://github.com/NVIDIA/TensorRT/issues/1196 (sometimes big diff in output when using FP16)\n",
    "                config.set_flag(trt.BuilderFlag.OBEY_PRECISION_CONSTRAINTS)\n",
    "                with open(onnx_file_path, \"rb\") as f:\n",
    "                    parser.parse(f.read())\n",
    "                profile: IOptimizationProfile = builder.create_optimization_profile()\n",
    "                for num_input in range(network_definition.num_inputs):\n",
    "                    profile.set_shape(\n",
    "                        input=network_definition.get_input(num_input).name,\n",
    "                        min=min_shape,\n",
    "                        opt=optimal_shape,\n",
    "                        max=max_shape,\n",
    "                    )\n",
    "                config.add_optimization_profile(profile)\n",
    "                if fp16:\n",
    "                    network_definition = fix_fp16_network(network_definition)\n",
    "                trt_engine = builder.build_serialized_network(network_definition, config)\n",
    "                engine: ICudaEngine = runtime.deserialize_cuda_engine(trt_engine)\n",
    "                assert engine is not None, \"error during engine generation, check error messages above :-(\"\n",
    "                return engine\n",
    "\n",
    "\n",
    "def get_output_tensors(\n",
    "    context: trt.IExecutionContext,\n",
    "    host_inputs: List[torch.Tensor],\n",
    "    input_binding_idxs: List[int],\n",
    "    output_binding_idxs: List[int],\n",
    ") -> List[torch.Tensor]:\n",
    "    \"\"\"\n",
    "    Reserve memory in GPU for input and output tensors.\n",
    "    :param context: TensorRT context shared accross inference steps\n",
    "    :param host_inputs: input tensor\n",
    "    :param input_binding_idxs: indexes of each input vector (should be the same than during building)\n",
    "    :param output_binding_idxs: indexes of each output vector (should be the same than during building)\n",
    "    :return: tensors where output will be stored\n",
    "    \"\"\"\n",
    "    # explicitly set dynamic input shapes, so dynamic output shapes can be computed internally\n",
    "    for host_input, binding_index in zip(host_inputs, input_binding_idxs):\n",
    "        context.set_binding_shape(binding_index, tuple(host_input.shape))\n",
    "    assert context.all_binding_shapes_specified\n",
    "    device_outputs: List[torch.Tensor] = []\n",
    "    for binding_index in output_binding_idxs:\n",
    "        # TensorRT computes output shape based on input shape provided above\n",
    "        output_shape = context.get_binding_shape(binding_index)\n",
    "        # allocate buffers to hold output results\n",
    "        output = torch.empty(tuple(output_shape), device=\"cuda\")\n",
    "        device_outputs.append(output)\n",
    "    return device_outputs\n",
    "\n",
    "\n",
    "def infer_tensorrt(\n",
    "    context: IExecutionContext,\n",
    "    host_inputs: OrderedDict[str, torch.Tensor],\n",
    "    input_binding_idxs: List[int],\n",
    "    output_binding_idxs: List[int],\n",
    ") -> List[torch.Tensor]:\n",
    "    \"\"\"\n",
    "    Perform inference with TensorRT.\n",
    "    :param context: shared variable\n",
    "    :param host_inputs: input tensor\n",
    "    :param input_binding_idxs: input tensor indexes\n",
    "    :param output_binding_idxs: output tensor indexes\n",
    "    :return: output tensor\n",
    "    \"\"\"\n",
    "    input_tensors: List[torch.Tensor] = list()\n",
    "    for tensor in host_inputs.values():\n",
    "        assert isinstance(tensor, torch.Tensor), f\"unexpected tensor type: {tensor.dtype}\"\n",
    "        # warning: small changes in output if int64 is used instead of int32\n",
    "        tensor = tensor.type(torch.int32)\n",
    "        tensor = tensor.to(\"cuda\")\n",
    "        input_tensors.append(tensor)\n",
    "    # calculate input shape, bind it, allocate GPU memory for the output\n",
    "    output_tensors: List[torch.Tensor] = get_output_tensors(\n",
    "        context, input_tensors, input_binding_idxs, output_binding_idxs\n",
    "    )\n",
    "    bindings = [int(i.data_ptr()) for i in input_tensors + output_tensors]\n",
    "    assert context.execute_async_v2(\n",
    "        bindings, torch.cuda.current_stream().cuda_stream\n",
    "    ), \"failure during execution of inference\"\n",
    "    torch.cuda.current_stream().synchronize()  # sync all CUDA ops\n",
    "    return output_tensors\n",
    "\n",
    "\n",
    "def load_engine(\n",
    "    runtime: Runtime, engine_file_path: str, profile_index: int = 0\n",
    ") -> Callable[[Dict[str, torch.Tensor]], torch.Tensor]:\n",
    "    \"\"\"\n",
    "    Load serialized TensorRT engine.\n",
    "    :param runtime: shared variable\n",
    "    :param engine_file_path: path to the serialized engine\n",
    "    :param profile_index: which profile to load, 0 if you have not used multiple profiles\n",
    "    :return: A function to perform inference\n",
    "    \"\"\"\n",
    "    with open(file=engine_file_path, mode=\"rb\") as f:\n",
    "        engine: ICudaEngine = runtime.deserialize_cuda_engine(f.read())\n",
    "        stream: int = torch.cuda.current_stream().cuda_stream\n",
    "        context: IExecutionContext = engine.create_execution_context()\n",
    "        context.set_optimization_profile_async(profile_index=profile_index, stream_handle=stream)\n",
    "        # retrieve input/output IDs\n",
    "        input_binding_idxs, output_binding_idxs = get_binding_idxs(engine, profile_index)  # type: List[int], List[int]\n",
    "\n",
    "        def tensorrt_model(inputs: Dict[str, torch.Tensor]) -> torch.Tensor:\n",
    "            return infer_tensorrt(\n",
    "                context=context,\n",
    "                host_inputs=inputs,\n",
    "                input_binding_idxs=input_binding_idxs,\n",
    "                output_binding_idxs=output_binding_idxs,\n",
    "            )\n",
    "\n",
    "        return tensorrt_model\n",
    "\n",
    "\n",
    "def save_engine(engine: ICudaEngine, engine_file_path: str) -> None:\n",
    "    \"\"\"\n",
    "    Serialize TensorRT engine to file.\n",
    "    :param engine: TensorRT engine\n",
    "    :param engine_file_path: output path\n",
    "    \"\"\"\n",
    "    with open(engine_file_path, \"wb\") as f:\n",
    "        f.write(engine.serialize())\n",
    "\n",
    "\n",
    "def get_binding_idxs(engine: trt.ICudaEngine, profile_index: int):\n",
    "    \"\"\"\n",
    "    Calculate start/end binding indices for current context's profile\n",
    "    https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#opt_profiles_bindings\n",
    "    :param engine: TensorRT engine generated during the model building\n",
    "    :param profile_index: profile to use (several profiles can be set during building)\n",
    "    :return: input and output tensor indexes\n",
    "    \"\"\"\n",
    "    num_bindings_per_profile = engine.num_bindings // engine.num_optimization_profiles\n",
    "    start_binding = profile_index * num_bindings_per_profile\n",
    "    end_binding = start_binding + num_bindings_per_profile  # Separate input and output binding indices for convenience\n",
    "    input_binding_idxs: List[int] = []\n",
    "    output_binding_idxs: List[int] = []\n",
    "    for binding_index in range(start_binding, end_binding):\n",
    "        if engine.binding_is_input(binding_index):\n",
    "            input_binding_idxs.append(binding_index)\n",
    "        else:\n",
    "            output_binding_idxs.append(binding_index)\n",
    "    return input_binding_idxs, output_binding_idxs\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "load_engine() missing 1 required positional argument: 'runtime'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m/tmp/ipykernel_9093/3074933749.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mengine\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_engine\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mengine_file_path\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mengine_file_path\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m: load_engine() missing 1 required positional argument: 'runtime'"
     ]
    }
   ],
   "source": [
    "engine = load_engine(engine_file_path=engine_file_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# https://github.com/NVIDIA/TensorRT/blob/main/quickstart/SemanticSegmentation/tutorial-runtime.ipynb\n",
    "\n",
    "# docker run --gpus all -p2222:22 -d nvcr.io/nvidia/pytorch:21.02-py3 --hostname test_docker"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
