<!DOCTYPE html>
<html>
 <head>
  <meta charset="utf-8"/>
  <meta content="width=device-width, initial-scale=1.0" name="viewport"/>
  <meta content="width=device-width,initial-scale=1" name="viewport"/>
  <meta content="ie=edge" http-equiv="x-ua-compatible"/>
  <meta content="Copy to clipboard" name="lang:clipboard.copy"/>
  <meta content="Copied to clipboard" name="lang:clipboard.copied"/>
  <meta content="en" name="lang:search.language"/>
  <meta content="True" name="lang:search.pipeline.stopwords"/>
  <meta content="True" name="lang:search.pipeline.trimmer"/>
  <meta content="No matching documents" name="lang:search.result.none"/>
  <meta content="1 matching document" name="lang:search.result.one"/>
  <meta content="# matching documents" name="lang:search.result.other"/>
  <meta content="[\s\-]+" name="lang:search.tokenizer"/>
  <link crossorigin="" href="https://fonts.gstatic.com/" rel="preconnect"/>
  <link href="https://fonts.googleapis.com/css?family=Roboto+Mono:400,500,700|Roboto:300,400,400i,700&amp;display=fallback" rel="stylesheet"/>
  <style>
   body,
      input {
        font-family: "Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif
      }

      code,
      kbd,
      pre {
        font-family: "Roboto Mono", "Courier New", Courier, monospace
      }
  </style>
  <link href="../_static/stylesheets/application.css" rel="stylesheet"/>
  <link href="../_static/stylesheets/application-palette.css" rel="stylesheet"/>
  <link href="../_static/stylesheets/application-fixes.css" rel="stylesheet"/>
  <link href="../_static/fonts/material-icons.css" rel="stylesheet"/>
  <meta content="84bd00" name="theme-color"/>
  <script src="../_static/javascripts/modernizr.js">
  </script>
  <title>
   Masked Language Modeling (MLM) with Hugging Face BERT Transformer — Torch-TensorRT v1.1.1 documentation
  </title>
  <link href="../_static/material.css" rel="stylesheet" type="text/css"/>
  <link href="../_static/pygments.css" rel="stylesheet" type="text/css"/>
  <link href="../_static/collapsible-lists/css/tree_view.css" rel="stylesheet" type="text/css"/>
  <script data-url_root="../" id="documentation_options" src="../_static/documentation_options.js">
  </script>
  <script src="../_static/jquery.js">
  </script>
  <script src="../_static/underscore.js">
  </script>
  <script src="../_static/doctools.js">
  </script>
  <script src="../_static/language_data.js">
  </script>
  <script src="../_static/collapsible-lists/js/CollapsibleLists.compressed.js">
  </script>
  <script src="../_static/collapsible-lists/js/apply-collapsible-lists.js">
  </script>
  <script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js">
  </script>
  <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/latest.js?config=TeX-AMS-MML_HTMLorMML">
  </script>
  <script type="text/x-mathjax-config">
   MathJax.Hub.Config({"tex2jax": {"inlineMath": [["$", "$"], ["\\(", "\\)"]], "processEscapes": true, "ignoreClass": "document", "processClass": "math|output_area"}})
  </script>
  <link href="../genindex.html" rel="index" title="Index"/>
  <link href="../search.html" rel="search" title="Search"/>
  <link href="lenet-getting-started.html" rel="next" title="Torch-TensorRT Getting Started - LeNet"/>
  <link href="EfficientNet-example.html" rel="prev" title="Torch-TensorRT Getting Started - EfficientNet-B0"/>
 </head>
 <body data-md-color-accent="light-green" data-md-color-primary="light-green" dir="ltr">
  <svg class="md-svg">
   <defs data-children-count="0">
    <svg height="448" id="__github" viewbox="0 0 416 448" width="416" xmlns="http://www.w3.org/2000/svg">
     <path d="M160 304q0 10-3.125 20.5t-10.75 19T128 352t-18.125-8.5-10.75-19T96 304t3.125-20.5 10.75-19T128 256t18.125 8.5 10.75 19T160 304zm160 0q0 10-3.125 20.5t-10.75 19T288 352t-18.125-8.5-10.75-19T256 304t3.125-20.5 10.75-19T288 256t18.125 8.5 10.75 19T320 304zm40 0q0-30-17.25-51T296 232q-10.25 0-48.75 5.25Q229.5 240 208 240t-39.25-2.75Q130.75 232 120 232q-29.5 0-46.75 21T56 304q0 22 8 38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0 37.25-1.75t35-7.375 30.5-15 20.25-25.75T360 304zm56-44q0 51.75-15.25 82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5T212 416q-19.5 0-35.5-.75t-36.875-3.125-38.125-7.5-34.25-12.875T37 371.5t-21.5-28.75Q0 312 0 260q0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25 30.875Q171.5 96 212 96q37 0 70 8 26.25-20.5 46.75-30.25T376 64q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34 99.5z" fill="currentColor">
     </path>
    </svg>
   </defs>
  </svg>
  <input class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
  <input class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
  <label class="md-overlay" data-md-component="overlay" for="__drawer">
  </label>
  <a class="md-skip" href="#_notebooks/Hugging-Face-BERT" tabindex="1">
   Skip to content
  </a>
  <header class="md-header" data-md-component="header">
   <nav class="md-header-nav md-grid">
    <div class="md-flex navheader">
     <div class="md-flex__cell md-flex__cell--shrink">
      <a class="md-header-nav__button md-logo" href="../index.html" title="Torch-TensorRT v1.1.1 documentation">
       <i class="md-icon">
        
       </i>
      </a>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink">
      <label class="md-icon md-icon--menu md-header-nav__button" for="__drawer">
      </label>
     </div>
     <div class="md-flex__cell md-flex__cell--stretch">
      <div class="md-flex__ellipsis md-header-nav__title" data-md-component="title">
       <span class="md-header-nav__topic">
        Torch-TensorRT
       </span>
       <span class="md-header-nav__topic">
        Masked Language Modeling (MLM) with Hugging Face BERT Transformer
       </span>
      </div>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink">
      <label class="md-icon md-icon--search md-header-nav__button" for="__search">
      </label>
      <div class="md-search" data-md-component="search" role="dialog">
       <label class="md-search__overlay" for="__search">
       </label>
       <div class="md-search__inner" role="search">
        <form action="../search.html" class="md-search__form" method="get" name="search">
         <input autocapitalize="off" autocomplete="off" class="md-search__input" data-md-component="query" data-md-state="active" name="q" placeholder="Search" spellcheck="false" type="text"/>
         <label class="md-icon md-search__icon" for="__search">
         </label>
         <button class="md-icon md-search__icon" data-md-component="reset" tabindex="-1" type="reset">
          
         </button>
        </form>
        <div class="md-search__output">
         <div class="md-search__scrollwrap" data-md-scrollfix="">
          <div class="md-search-result" data-md-component="result">
           <div class="md-search-result__meta">
            Type to start searching
           </div>
           <ol class="md-search-result__list">
           </ol>
          </div>
         </div>
        </div>
       </div>
      </div>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink">
      <div class="md-header-nav__source">
       <a class="md-source" data-md-source="github" href="https://github.com/nvidia/Torch-TensorRT/" title="Go to repository">
        <div class="md-source__icon">
         <svg height="28" viewbox="0 0 24 24" width="28" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
          <use height="24" width="24" xlink:href="#__github">
          </use>
         </svg>
        </div>
        <div class="md-source__repository">
         Torch-TensorRT
        </div>
       </a>
      </div>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink dropdown">
      <button class="dropdownbutton">
       Versions
      </button>
      <div class="dropdown-content md-hero">
       <a href="https://nvidia.github.io/Torch-TensorRT/" title="master">
        master
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v1.1.1/" title="v1.1.1">
        v1.1.1
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v1.1.0/" title="v1.1.0">
        v1.1.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v1.0.0/" title="v1.0.0">
        v1.0.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.4.1/" title="v0.4.1">
        v0.4.1
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.4.0/" title="v0.4.0">
        v0.4.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.3.0/" title="v0.3.0">
        v0.3.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.2.0/" title="v0.2.0">
        v0.2.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.1.0/" title="v0.1.0">
        v0.1.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.0.3/" title="v0.0.3">
        v0.0.3
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.0.2/" title="v0.0.2">
        v0.0.2
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.0.1/" title="v0.0.1">
        v0.0.1
       </a>
      </div>
     </div>
    </div>
   </nav>
  </header>
  <div class="md-container">
   <nav class="md-tabs" data-md-component="tabs">
    <div class="md-tabs__inner md-grid">
     <ul class="md-tabs__list">
      <li class="md-tabs__item">
       <a class="md-tabs__link" href="../index.html">
        Torch-TensorRT v1.1.1 documentation
       </a>
      </li>
     </ul>
    </div>
   </nav>
   <main class="md-main">
    <div class="md-main__inner md-grid" data-md-component="container">
     <div class="md-sidebar md-sidebar--primary" data-md-component="navigation">
      <div class="md-sidebar__scrollwrap">
       <div class="md-sidebar__inner">
        <nav class="md-nav md-nav--primary" data-md-level="0">
         <label class="md-nav__title md-nav__title--site" for="__drawer">
          <a class="md-nav__button md-logo" href="../index.html" title="Torch-TensorRT v1.1.1 documentation">
           <i class="md-icon">
            
           </i>
          </a>
          <a href="../index.html" title="Torch-TensorRT v1.1.1 documentation">
           Torch-TensorRT
          </a>
         </label>
         <div class="md-nav__source">
          <a class="md-source" data-md-source="github" href="https://github.com/nvidia/Torch-TensorRT/" title="Go to repository">
           <div class="md-source__icon">
            <svg height="28" viewbox="0 0 24 24" width="28" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
             <use height="24" width="24" xlink:href="#__github">
             </use>
            </svg>
           </div>
           <div class="md-source__repository">
            Torch-TensorRT
           </div>
          </a>
         </div>
         <ul class="md-nav__list">
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Getting Started
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/installation.html">
            Installation
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/getting_started_with_cpp_api.html">
            Getting Started with C++
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/getting_started_with_python_api.html">
            Using Torch-TensorRT in Python
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/creating_torchscript_module_in_python.html">
            Creating a TorchScript Module
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/creating_torchscript_module_in_python.html#working-with-torchscript-in-python">
            Working with TorchScript in Python
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/creating_torchscript_module_in_python.html#saving-torchscript-module-to-disk">
            Saving TorchScript Module to Disk
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/ptq.html">
            Post Training Quantization (PTQ)
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/torchtrtc.html">
            torchtrtc
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/use_from_pytorch.html">
            Using Torch-TensorRT Directly From PyTorch
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/runtime.html">
            Deploying Torch-TensorRT Programs
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/using_dla.html">
            DLA
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Notebooks
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="CitriNet-example.html">
            Torch-TensorRT Getting Started - CitriNet
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="dynamic-shapes.html">
            Torch-TensorRT - Using Dynamic Shapes
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="EfficientNet-example.html">
            Torch-TensorRT Getting Started - EfficientNet-B0
           </a>
          </li>
          <li class="md-nav__item">
           <input class="md-toggle md-nav__toggle" data-md-toggle="toc" id="__toc" type="checkbox"/>
           <label class="md-nav__link md-nav__link--active" for="__toc">
            Masked Language Modeling (MLM) with Hugging Face BERT Transformer
           </label>
           <a class="md-nav__link md-nav__link--active" href="#">
            Masked Language Modeling (MLM) with Hugging Face BERT Transformer
           </a>
           <nav class="md-nav md-nav--secondary">
            <label class="md-nav__title" for="__toc">
             Contents
            </label>
            <ul class="md-nav__list" data-md-scrollfix="">
             <li class="md-nav__item">
              <a class="md-nav__link" href="#notebooks-hugging-face-bert--page-root">
               Masked Language Modeling (MLM) with Hugging Face BERT Transformer
              </a>
              <nav class="md-nav">
               <ul class="md-nav__list">
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#Learning-objectives">
                  Learning objectives
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#Contents">
                  Contents
                 </a>
                 <nav class="md-nav">
                  <ul class="md-nav__list">
                   <li class="md-nav__item">
                    <a class="md-nav__link" href="#What’s-next">
                     What’s next
                    </a>
                   </li>
                  </ul>
                 </nav>
                </li>
               </ul>
              </nav>
             </li>
             <li class="md-nav__item">
              <a class="md-nav__extra_link" href="../_sources/_notebooks/Hugging-Face-BERT.ipynb.txt">
               Show Source
              </a>
             </li>
            </ul>
           </nav>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="lenet-getting-started.html">
            Torch-TensorRT Getting Started - LeNet
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="Resnet50-example.html">
            Torch-TensorRT Getting Started - ResNet 50
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="ssd-object-detection-demo.html">
            Object Detection with Torch-TensorRT (SSD)
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="vgg-qat.html">
            Deploying Quantization Aware Trained models in INT8 using Torch-TensorRT
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Python API Documenation
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../py_api/torch_tensorrt.html">
            torch_tensorrt
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../py_api/logging.html">
            torch_tensorrt.logging
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../py_api/ptq.html">
            torch_tensorrt.ptq
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../py_api/ts.html">
            torch_tensorrt.ts
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             C++ API Documenation
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/torch_tensort_cpp.html">
            Torch-TensorRT C++ API
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/namespace_torch_tensorrt.html">
            Namespace torch_tensorrt
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/namespace_torch_tensorrt__logging.html">
            Namespace torch_tensorrt::logging
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/namespace_torch_tensorrt__torchscript.html">
            Namespace torch_tensorrt::torchscript
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/namespace_torch_tensorrt__ptq.html">
            Namespace torch_tensorrt::ptq
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Contributor Documentation
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../contributors/system_overview.html">
            System Overview
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../contributors/writing_converters.html">
            Writing Converters
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../contributors/useful_links.html">
            Useful Links for Torch-TensorRT Development
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Indices
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../indices/supported_ops.html">
            Operators Supported
           </a>
          </li>
         </ul>
        </nav>
       </div>
      </div>
     </div>
     <div class="md-sidebar md-sidebar--secondary" data-md-component="toc">
      <div class="md-sidebar__scrollwrap">
       <div class="md-sidebar__inner">
        <nav class="md-nav md-nav--secondary">
         <label class="md-nav__title" for="__toc">
          Contents
         </label>
         <ul class="md-nav__list" data-md-scrollfix="">
          <li class="md-nav__item">
           <a class="md-nav__link" href="#notebooks-hugging-face-bert--page-root">
            Masked Language Modeling (MLM) with Hugging Face BERT Transformer
           </a>
           <nav class="md-nav">
            <ul class="md-nav__list">
             <li class="md-nav__item">
              <a class="md-nav__link" href="#Learning-objectives">
               Learning objectives
              </a>
             </li>
             <li class="md-nav__item">
              <a class="md-nav__link" href="#Contents">
               Contents
              </a>
              <nav class="md-nav">
               <ul class="md-nav__list">
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#What’s-next">
                  What’s next
                 </a>
                </li>
               </ul>
              </nav>
             </li>
            </ul>
           </nav>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__extra_link" href="../_sources/_notebooks/Hugging-Face-BERT.ipynb.txt">
            Show Source
           </a>
          </li>
          <li class="md-nav__item" id="searchbox">
          </li>
         </ul>
        </nav>
       </div>
      </div>
     </div>
     <div class="md-content">
      <article class="md-content__inner md-typeset" role="main">
       <style>
        /* CSS for nbsphinx extension */

/* remove conflicting styling from Sphinx themes */
div.nbinput.container div.prompt *,
div.nboutput.container div.prompt *,
div.nbinput.container div.input_area pre,
div.nboutput.container div.output_area pre,
div.nbinput.container div.input_area .highlight,
div.nboutput.container div.output_area .highlight {
    border: none;
    padding: 0;
    margin: 0;
    box-shadow: none;
}

div.nbinput.container > div[class*=highlight],
div.nboutput.container > div[class*=highlight] {
    margin: 0;
}

div.nbinput.container div.prompt *,
div.nboutput.container div.prompt * {
    background: none;
}

div.nboutput.container div.output_area .highlight,
div.nboutput.container div.output_area pre {
    background: unset;
}

div.nboutput.container div.output_area div.highlight {
    color: unset;  /* override Pygments text color */
}

/* avoid gaps between output lines */
div.nboutput.container div[class*=highlight] pre {
    line-height: normal;
}

/* input/output containers */
div.nbinput.container,
div.nboutput.container {
    display: -webkit-flex;
    display: flex;
    align-items: flex-start;
    margin: 0;
    width: 100%;
}
@media (max-width: 540px) {
    div.nbinput.container,
    div.nboutput.container {
        flex-direction: column;
    }
}

/* input container */
div.nbinput.container {
    padding-top: 5px;
}

/* last container */
div.nblast.container {
    padding-bottom: 5px;
}

/* input prompt */
div.nbinput.container div.prompt pre {
    color: #307FC1;
}

/* output prompt */
div.nboutput.container div.prompt pre {
    color: #BF5B3D;
}

/* all prompts */
div.nbinput.container div.prompt,
div.nboutput.container div.prompt {
    width: 4.5ex;
    padding-top: 5px;
    position: relative;
    user-select: none;
}

div.nbinput.container div.prompt > div,
div.nboutput.container div.prompt > div {
    position: absolute;
    right: 0;
    margin-right: 0.3ex;
}

@media (max-width: 540px) {
    div.nbinput.container div.prompt,
    div.nboutput.container div.prompt {
        width: unset;
        text-align: left;
        padding: 0.4em;
    }
    div.nboutput.container div.prompt.empty {
        padding: 0;
    }

    div.nbinput.container div.prompt > div,
    div.nboutput.container div.prompt > div {
        position: unset;
    }
}

/* disable scrollbars on prompts */
div.nbinput.container div.prompt pre,
div.nboutput.container div.prompt pre {
    overflow: hidden;
}

/* input/output area */
div.nbinput.container div.input_area,
div.nboutput.container div.output_area {
    -webkit-flex: 1;
    flex: 1;
    overflow: auto;
}
@media (max-width: 540px) {
    div.nbinput.container div.input_area,
    div.nboutput.container div.output_area {
        width: 100%;
    }
}

/* input area */
div.nbinput.container div.input_area {
    border: 1px solid #e0e0e0;
    border-radius: 2px;
    /*background: #f5f5f5;*/
}

/* override MathJax center alignment in output cells */
div.nboutput.container div[class*=MathJax] {
    text-align: left !important;
}

/* override sphinx.ext.imgmath center alignment in output cells */
div.nboutput.container div.math p {
    text-align: left;
}

/* standard error */
div.nboutput.container div.output_area.stderr {
    background: #fdd;
}

/* ANSI colors */
.ansi-black-fg { color: #3E424D; }
.ansi-black-bg { background-color: #3E424D; }
.ansi-black-intense-fg { color: #282C36; }
.ansi-black-intense-bg { background-color: #282C36; }
.ansi-red-fg { color: #E75C58; }
.ansi-red-bg { background-color: #E75C58; }
.ansi-red-intense-fg { color: #B22B31; }
.ansi-red-intense-bg { background-color: #B22B31; }
.ansi-green-fg { color: #00A250; }
.ansi-green-bg { background-color: #00A250; }
.ansi-green-intense-fg { color: #007427; }
.ansi-green-intense-bg { background-color: #007427; }
.ansi-yellow-fg { color: #DDB62B; }
.ansi-yellow-bg { background-color: #DDB62B; }
.ansi-yellow-intense-fg { color: #B27D12; }
.ansi-yellow-intense-bg { background-color: #B27D12; }
.ansi-blue-fg { color: #208FFB; }
.ansi-blue-bg { background-color: #208FFB; }
.ansi-blue-intense-fg { color: #0065CA; }
.ansi-blue-intense-bg { background-color: #0065CA; }
.ansi-magenta-fg { color: #D160C4; }
.ansi-magenta-bg { background-color: #D160C4; }
.ansi-magenta-intense-fg { color: #A03196; }
.ansi-magenta-intense-bg { background-color: #A03196; }
.ansi-cyan-fg { color: #60C6C8; }
.ansi-cyan-bg { background-color: #60C6C8; }
.ansi-cyan-intense-fg { color: #258F8F; }
.ansi-cyan-intense-bg { background-color: #258F8F; }
.ansi-white-fg { color: #C5C1B4; }
.ansi-white-bg { background-color: #C5C1B4; }
.ansi-white-intense-fg { color: #A1A6B2; }
.ansi-white-intense-bg { background-color: #A1A6B2; }

.ansi-default-inverse-fg { color: #FFFFFF; }
.ansi-default-inverse-bg { background-color: #000000; }

.ansi-bold { font-weight: bold; }
.ansi-underline { text-decoration: underline; }


div.nbinput.container div.input_area div[class*=highlight] > pre,
div.nboutput.container div.output_area div[class*=highlight] > pre,
div.nboutput.container div.output_area div[class*=highlight].math,
div.nboutput.container div.output_area.rendered_html,
div.nboutput.container div.output_area > div.output_javascript,
div.nboutput.container div.output_area:not(.rendered_html) > img{
    padding: 5px;
    margin: 0;
}

/* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */
div.nbinput.container div.input_area > div[class^='highlight'],
div.nboutput.container div.output_area > div[class^='highlight']{
    overflow-y: hidden;
}

/* hide copybtn icon on prompts (needed for 'sphinx_copybutton') */
.prompt a.copybtn {
    display: none;
}

/* Some additional styling taken form the Jupyter notebook CSS */
div.rendered_html table {
  border: none;
  border-collapse: collapse;
  border-spacing: 0;
  color: black;
  font-size: 12px;
  table-layout: fixed;
}
div.rendered_html thead {
  border-bottom: 1px solid black;
  vertical-align: bottom;
}
div.rendered_html tr,
div.rendered_html th,
div.rendered_html td {
  text-align: right;
  vertical-align: middle;
  padding: 0.5em 0.5em;
  line-height: normal;
  white-space: normal;
  max-width: none;
  border: none;
}
div.rendered_html th {
  font-weight: bold;
}
div.rendered_html tbody tr:nth-child(odd) {
  background: #f5f5f5;
}
div.rendered_html tbody tr:hover {
  background: rgba(66, 165, 245, 0.2);
}
       </style>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[1]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="c1"># Copyright 2022 NVIDIA Corporation. All Rights Reserved.</span>
<span class="c1">#</span>
<span class="c1"># Licensed under the Apache License, Version 2.0 (the "License");</span>
<span class="c1"># you may not use this file except in compliance with the License.</span>
<span class="c1"># You may obtain a copy of the License at</span>
<span class="c1">#</span>
<span class="c1">#     http://www.apache.org/licenses/LICENSE-2.0</span>
<span class="c1">#</span>
<span class="c1"># Unless required by applicable law or agreed to in writing, software</span>
<span class="c1"># distributed under the License is distributed on an "AS IS" BASIS,</span>
<span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
<span class="c1"># See the License for the specific language governing permissions and</span>
<span class="c1"># limitations under the License.</span>
<span class="c1"># ==============================================================================</span>
</pre>
         </div>
        </div>
       </div>
       <p>
        <img alt="5a241c67493b41feb5c5af93f7b9d7c4" src="https://developer.download.nvidia.com/tesla/notebook_assets/nv_logo_torch_trt_resnet_notebook.png"/>
       </p>
       <h1 id="notebooks-hugging-face-bert--page-root">
        Masked Language Modeling (MLM) with Hugging Face BERT Transformer
        <a class="headerlink" href="#notebooks-hugging-face-bert--page-root" title="Permalink to this headline">
         ¶
        </a>
       </h1>
       <h2 id="Learning-objectives">
        Learning objectives
        <a class="headerlink" href="#Learning-objectives" title="Permalink to this headline">
         ¶
        </a>
       </h2>
       <p>
        This notebook demonstrates the steps for compiling a TorchScript module with Torch-TensorRT on a pretrained BERT transformer from Hugging Face, and running it to test the speedup obtained.
       </p>
       <h2 id="Contents">
        Contents
        <a class="headerlink" href="#Contents" title="Permalink to this headline">
         ¶
        </a>
       </h2>
       <ol class="arabic simple">
        <li>
         <p>
          <a class="reference external" href="#1">
           Requirements
          </a>
         </p>
        </li>
        <li>
         <p>
          <a class="reference external" href="#2">
           BERT Overview
          </a>
         </p>
        </li>
        <li>
         <p>
          <a class="reference external" href="#3">
           Creating TorchScript modules
          </a>
         </p>
        </li>
        <li>
         <p>
          <a class="reference external" href="#4">
           Compiling with Torch-TensorRT
          </a>
         </p>
        </li>
        <li>
         <p>
          <a class="reference external" href="#5">
           Benchmarking
          </a>
         </p>
        </li>
        <li>
         <p>
          <a class="reference external" href="#6">
           Conclusion
          </a>
         </p>
        </li>
       </ol>
       <p>
        ## 1. Requirements
       </p>
       <p>
        NVIDIA’s NGC provides a PyTorch Docker Container which contains PyTorch and Torch-TensorRT. Starting with version
        <code class="docutils literal notranslate">
         <span class="pre">
          22.05-py3
         </span>
        </code>
        , we can make use of
        <a class="reference external" href="https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch">
         latest pytorch
        </a>
        container to run this notebook.
       </p>
       <p>
        Otherwise, you can follow the steps in
        <code class="docutils literal notranslate">
         <span class="pre">
          notebooks/README
         </span>
        </code>
        to prepare a Docker container yourself, within which you can run this demo notebook.
       </p>
       <div class="nbinput docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[2]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="o">!</span>pip install transformers
</pre>
         </div>
        </div>
       </div>
       <div class="nboutput nblast docutils container">
        <div class="prompt empty docutils container">
        </div>
        <div class="output_area docutils container">
         <div class="highlight">
          <pre>
Looking in indexes: https://pypi.org/simple, https://pypi.ngc.nvidia.com
Requirement already satisfied: transformers in /opt/conda/lib/python3.8/site-packages (4.18.0)
Requirement already satisfied: tqdm&gt;=4.27 in /opt/conda/lib/python3.8/site-packages (from transformers) (4.63.0)
Requirement already satisfied: regex!=2019.12.17 in /opt/conda/lib/python3.8/site-packages (from transformers) (2022.3.15)
Requirement already satisfied: huggingface-hub&lt;1.0,&gt;=0.1.0 in /opt/conda/lib/python3.8/site-packages (from transformers) (0.5.1)
Requirement already satisfied: tokenizers!=0.11.3,&lt;0.13,&gt;=0.11.1 in /opt/conda/lib/python3.8/site-packages (from transformers) (0.12.1)
Requirement already satisfied: numpy&gt;=1.17 in /opt/conda/lib/python3.8/site-packages (from transformers) (1.22.3)
Requirement already satisfied: sacremoses in /opt/conda/lib/python3.8/site-packages (from transformers) (0.0.49)
Requirement already satisfied: requests in /opt/conda/lib/python3.8/site-packages (from transformers) (2.27.1)
Requirement already satisfied: pyyaml&gt;=5.1 in /opt/conda/lib/python3.8/site-packages (from transformers) (6.0)
Requirement already satisfied: filelock in /opt/conda/lib/python3.8/site-packages (from transformers) (3.6.0)
Requirement already satisfied: packaging&gt;=20.0 in /opt/conda/lib/python3.8/site-packages (from transformers) (21.3)
Requirement already satisfied: typing-extensions&gt;=3.7.4.3 in /opt/conda/lib/python3.8/site-packages (from huggingface-hub&lt;1.0,&gt;=0.1.0-&gt;transformers) (4.1.1)
Requirement already satisfied: pyparsing!=3.0.5,&gt;=2.0.2 in /opt/conda/lib/python3.8/site-packages (from packaging&gt;=20.0-&gt;transformers) (3.0.7)
Requirement already satisfied: urllib3&lt;1.27,&gt;=1.21.1 in /opt/conda/lib/python3.8/site-packages (from requests-&gt;transformers) (1.26.8)
Requirement already satisfied: charset-normalizer~=2.0.0 in /opt/conda/lib/python3.8/site-packages (from requests-&gt;transformers) (2.0.12)
Requirement already satisfied: certifi&gt;=2017.4.17 in /opt/conda/lib/python3.8/site-packages (from requests-&gt;transformers) (2021.10.8)
Requirement already satisfied: idna&lt;4,&gt;=2.5 in /opt/conda/lib/python3.8/site-packages (from requests-&gt;transformers) (3.3)
Requirement already satisfied: six in /opt/conda/lib/python3.8/site-packages (from sacremoses-&gt;transformers) (1.16.0)
Requirement already satisfied: click in /opt/conda/lib/python3.8/site-packages (from sacremoses-&gt;transformers) (8.0.4)
Requirement already satisfied: joblib in /opt/conda/lib/python3.8/site-packages (from sacremoses-&gt;transformers) (1.1.0)
<span class="ansi-yellow-fg">WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv</span>
</pre>
         </div>
        </div>
       </div>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[3]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="kn">from</span> <span class="nn">transformers</span> <span class="kn">import</span> <span class="n">BertTokenizer</span><span class="p">,</span> <span class="n">BertForMaskedLM</span>
<span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">timeit</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">import</span> <span class="nn">torch_tensorrt</span>
<span class="kn">import</span> <span class="nn">torch.backends.cudnn</span> <span class="k">as</span> <span class="nn">cudnn</span>
</pre>
         </div>
        </div>
       </div>
       <p>
        ## 2. BERT Overview
       </p>
       <p>
        Transformers comprise a class of deep learning algorithms employing self-attention; broadly speaking, the models learn large matrices of numbers, each element of which denotes how important one component of input data is to another. Since their introduction in 2017, transformers have enjoyed widespread adoption, particularly in natural language processing, but also in computer vision problems. This is largely because they are easier to parallelize than the sequence models which attention
mechanisms were originally designed to augment.
       </p>
       <p>
        Hugging Face is a company that maintains a huge respository of pre-trained transformer models. The company also provides tools for integrating those models into PyTorch code and running inference with them.
       </p>
       <p>
        One of the most popular transformer models is BERT (Bidirectional Encoder Representations from Transformers). First developed at Google and released in 2018, it has become the backbone of Google’s search engine and a standard benchmark for NLP experiments. BERT was originally trained for next sentence prediction and masked language modeling (MLM), which aims to predict hidden words in sentences. In this notebook, we will use Hugging Face’s
        <code class="docutils literal notranslate">
         <span class="pre">
          bert-base-uncased
         </span>
        </code>
        model (BERT’s smallest and
simplest form, which does not employ text capitalization) for MLM.
       </p>
       <p>
        ## 3. Creating TorchScript modules
       </p>
       <p>
        First, create a pretrained BERT tokenizer from the
        <code class="docutils literal notranslate">
         <span class="pre">
          bert-base-uncased
         </span>
        </code>
        model
       </p>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[4]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">enc</span> <span class="o">=</span> <span class="n">BertTokenizer</span><span class="o">.</span><span class="n">from_pretrained</span><span class="p">(</span><span class="s1">'bert-base-uncased'</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <p>
        Create dummy inputs to generate a traced TorchScript model later
       </p>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[5]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">batch_size</span> <span class="o">=</span> <span class="mi">4</span>

<span class="n">batched_indexed_tokens</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">101</span><span class="p">,</span> <span class="mi">64</span><span class="p">]</span><span class="o">*</span><span class="mi">64</span><span class="p">]</span><span class="o">*</span><span class="n">batch_size</span>
<span class="n">batched_segment_ids</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">0</span><span class="p">,</span> <span class="mi">1</span><span class="p">]</span><span class="o">*</span><span class="mi">64</span><span class="p">]</span><span class="o">*</span><span class="n">batch_size</span>
<span class="n">batched_attention_masks</span> <span class="o">=</span> <span class="p">[[</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">]</span><span class="o">*</span><span class="mi">64</span><span class="p">]</span><span class="o">*</span><span class="n">batch_size</span>

<span class="n">tokens_tensor</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="n">batched_indexed_tokens</span><span class="p">)</span>
<span class="n">segments_tensor</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="n">batched_segment_ids</span><span class="p">)</span>
<span class="n">attention_masks_tensor</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="n">batched_attention_masks</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <p>
        Obtain a BERT masked language model from Hugging Face in the (scripted) TorchScript, then use the dummy inputs to trace it
       </p>
       <div class="nbinput docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[6]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">mlm_model_ts</span> <span class="o">=</span> <span class="n">BertForMaskedLM</span><span class="o">.</span><span class="n">from_pretrained</span><span class="p">(</span><span class="s1">'bert-base-uncased'</span><span class="p">,</span> <span class="n">torchscript</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">traced_mlm_model</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">trace</span><span class="p">(</span><span class="n">mlm_model_ts</span><span class="p">,</span> <span class="p">[</span><span class="n">tokens_tensor</span><span class="p">,</span> <span class="n">segments_tensor</span><span class="p">,</span> <span class="n">attention_masks_tensor</span><span class="p">])</span>
</pre>
         </div>
        </div>
       </div>
       <div class="nboutput nblast docutils container">
        <div class="prompt empty docutils container">
        </div>
        <div class="output_area stderr docutils container">
         <div class="highlight">
          <pre>
Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertForMaskedLM: ['cls.seq_relationship.bias', 'cls.seq_relationship.weight']
- This IS expected if you are initializing BertForMaskedLM from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing BertForMaskedLM from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
</pre>
         </div>
        </div>
       </div>
       <p>
        Define 4 masked sentences, with 1 word in each sentence hidden from the model. Fluent English speakers will probably be able to guess the masked words, but just in case, they are
        <code class="docutils literal notranslate">
         <span class="pre">
          'capital'
         </span>
        </code>
        ,
        <code class="docutils literal notranslate">
         <span class="pre">
          'language'
         </span>
        </code>
        ,
        <code class="docutils literal notranslate">
         <span class="pre">
          'innings'
         </span>
        </code>
        , and
        <code class="docutils literal notranslate">
         <span class="pre">
          'mathematics'
         </span>
        </code>
        .
       </p>
       <p>
        Also create a list containing the position of the masked word within each sentence. Given Python’s 0-based indexing convention, the numbers are each higher by 1 than might be expected. This is because the token at index 0 in each sentence is a beginning-of-sentence token, denoted
        <code class="docutils literal notranslate">
         <span class="pre">
          [CLS]
         </span>
        </code>
        when entered explicitly.
       </p>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[7]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">masked_sentences</span> <span class="o">=</span> <span class="p">[</span><span class="s1">'Paris is the [MASK] of France.'</span><span class="p">,</span>
                    <span class="s1">'The primary [MASK] of the United States is English.'</span><span class="p">,</span>
                    <span class="s1">'A baseball game consists of at least nine [MASK].'</span><span class="p">,</span>
                    <span class="s1">'Topology is a branch of [MASK] concerned with the properties of geometric objects that remain unchanged under continuous transformations.'</span><span class="p">]</span>
<span class="n">pos_masks</span> <span class="o">=</span> <span class="p">[</span><span class="mi">4</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">9</span><span class="p">,</span> <span class="mi">6</span><span class="p">]</span>
</pre>
         </div>
        </div>
       </div>
       <p>
        Pass the masked sentences into the (scripted) TorchScript MLM model and verify that the unmasked sentences yield the expected results.
       </p>
       <p>
        Because the sentences are of different lengths, we must specify the
        <code class="docutils literal notranslate">
         <span class="pre">
          padding
         </span>
        </code>
        argument in calling our encoder/tokenizer. There are several possible padding strategies, but we’ll use
        <code class="docutils literal notranslate">
         <span class="pre">
          'max_length'
         </span>
        </code>
        padding with
        <code class="docutils literal notranslate">
         <span class="pre">
          max_length=128
         </span>
        </code>
        . Later, when we compile an optimized version of the model with Torch-TensorRT, the optimized model will expect inputs of length 128, hence our choice of padding strategy and length here.
       </p>
       <div class="nbinput docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[8]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">encoded_inputs</span> <span class="o">=</span> <span class="n">enc</span><span class="p">(</span><span class="n">masked_sentences</span><span class="p">,</span> <span class="n">return_tensors</span><span class="o">=</span><span class="s1">'pt'</span><span class="p">,</span> <span class="n">padding</span><span class="o">=</span><span class="s1">'max_length'</span><span class="p">,</span> <span class="n">max_length</span><span class="o">=</span><span class="mi">128</span><span class="p">)</span>
<span class="n">outputs</span> <span class="o">=</span> <span class="n">mlm_model_ts</span><span class="p">(</span><span class="o">**</span><span class="n">encoded_inputs</span><span class="p">)</span>
<span class="n">most_likely_token_ids</span> <span class="o">=</span> <span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">argmax</span><span class="p">(</span><span class="n">outputs</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="n">i</span><span class="p">,</span> <span class="n">pos</span><span class="p">,</span> <span class="p">:])</span> <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">pos</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">pos_masks</span><span class="p">)]</span>
<span class="n">unmasked_tokens</span> <span class="o">=</span> <span class="n">enc</span><span class="o">.</span><span class="n">decode</span><span class="p">(</span><span class="n">most_likely_token_ids</span><span class="p">)</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s1">' '</span><span class="p">)</span>
<span class="n">unmasked_sentences</span> <span class="o">=</span> <span class="p">[</span><span class="n">masked_sentences</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s1">'[MASK]'</span><span class="p">,</span> <span class="n">token</span><span class="p">)</span> <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">token</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">unmasked_tokens</span><span class="p">)]</span>
<span class="k">for</span> <span class="n">sentence</span> <span class="ow">in</span> <span class="n">unmasked_sentences</span><span class="p">:</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">sentence</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <div class="nboutput nblast docutils container">
        <div class="prompt empty docutils container">
        </div>
        <div class="output_area docutils container">
         <div class="highlight">
          <pre>
Paris is the capital of France.
The primary language of the United States is English.
A baseball game consists of at least nine innings.
Topology is a branch of mathematics concerned with the properties of geometric objects that remain unchanged under continuous transformations.
</pre>
         </div>
        </div>
       </div>
       <p>
        Pass the masked sentences into the traced MLM model and verify that the unmasked sentences yield the expected results.
       </p>
       <p>
        Note the difference in how the
        <code class="docutils literal notranslate">
         <span class="pre">
          encoded_inputs
         </span>
        </code>
        are passed into the model in the following cell compared to the previous one. If you examine
        <code class="docutils literal notranslate">
         <span class="pre">
          encoded_inputs
         </span>
        </code>
        , you’ll find that it’s a dictionary with 3 keys,
        <code class="docutils literal notranslate">
         <span class="pre">
          'input_ids'
         </span>
        </code>
        ,
        <code class="docutils literal notranslate">
         <span class="pre">
          'token_type_ids'
         </span>
        </code>
        , and
        <code class="docutils literal notranslate">
         <span class="pre">
          'attention_mask'
         </span>
        </code>
        , each with a PyTorch tensor as an associated value. The traced model will accept
        <code class="docutils literal notranslate">
         <span class="pre">
          **encoded_inputs
         </span>
        </code>
        as an input, but the Torch-TensorRT-optimized model (to be defined later) will not.
       </p>
       <div class="nbinput docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[9]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">encoded_inputs</span> <span class="o">=</span> <span class="n">enc</span><span class="p">(</span><span class="n">masked_sentences</span><span class="p">,</span> <span class="n">return_tensors</span><span class="o">=</span><span class="s1">'pt'</span><span class="p">,</span> <span class="n">padding</span><span class="o">=</span><span class="s1">'max_length'</span><span class="p">,</span> <span class="n">max_length</span><span class="o">=</span><span class="mi">128</span><span class="p">)</span>
<span class="n">outputs</span> <span class="o">=</span> <span class="n">traced_mlm_model</span><span class="p">(</span><span class="n">encoded_inputs</span><span class="p">[</span><span class="s1">'input_ids'</span><span class="p">],</span> <span class="n">encoded_inputs</span><span class="p">[</span><span class="s1">'token_type_ids'</span><span class="p">],</span> <span class="n">encoded_inputs</span><span class="p">[</span><span class="s1">'attention_mask'</span><span class="p">])</span>
<span class="n">most_likely_token_ids</span> <span class="o">=</span> <span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">argmax</span><span class="p">(</span><span class="n">outputs</span><span class="p">[</span><span class="mi">0</span><span class="p">][</span><span class="n">i</span><span class="p">,</span> <span class="n">pos</span><span class="p">,</span> <span class="p">:])</span> <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">pos</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">pos_masks</span><span class="p">)]</span>
<span class="n">unmasked_tokens</span> <span class="o">=</span> <span class="n">enc</span><span class="o">.</span><span class="n">decode</span><span class="p">(</span><span class="n">most_likely_token_ids</span><span class="p">)</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s1">' '</span><span class="p">)</span>
<span class="n">unmasked_sentences</span> <span class="o">=</span> <span class="p">[</span><span class="n">masked_sentences</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s1">'[MASK]'</span><span class="p">,</span> <span class="n">token</span><span class="p">)</span> <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">token</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">unmasked_tokens</span><span class="p">)]</span>
<span class="k">for</span> <span class="n">sentence</span> <span class="ow">in</span> <span class="n">unmasked_sentences</span><span class="p">:</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">sentence</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <div class="nboutput nblast docutils container">
        <div class="prompt empty docutils container">
        </div>
        <div class="output_area docutils container">
         <div class="highlight">
          <pre>
Paris is the capital of France.
The primary language of the United States is English.
A baseball game consists of at least nine innings.
Topology is a branch of mathematics concerned with the properties of geometric objects that remain unchanged under continuous transformations.
</pre>
         </div>
        </div>
       </div>
       <p>
        ## 4. Compiling with Torch-TensorRT
       </p>
       <p>
        Change the logging level to avoid long printouts
       </p>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[10]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">new_level</span> <span class="o">=</span> <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">logging</span><span class="o">.</span><span class="n">Level</span><span class="o">.</span><span class="n">Error</span>
<span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">logging</span><span class="o">.</span><span class="n">set_reportable_log_level</span><span class="p">(</span><span class="n">new_level</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <p>
        Compile the model
       </p>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[11]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">trt_model</span> <span class="o">=</span> <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">compile</span><span class="p">(</span><span class="n">traced_mlm_model</span><span class="p">,</span>
    <span class="n">inputs</span><span class="o">=</span> <span class="p">[</span><span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">Input</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="n">batch_size</span><span class="p">,</span> <span class="mi">128</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">int32</span><span class="p">),</span>  <span class="c1"># input_ids</span>
             <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">Input</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="n">batch_size</span><span class="p">,</span> <span class="mi">128</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">int32</span><span class="p">),</span>  <span class="c1"># token_type_ids</span>
             <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">Input</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="n">batch_size</span><span class="p">,</span> <span class="mi">128</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">int32</span><span class="p">)],</span> <span class="c1"># attention_mask</span>
    <span class="n">enabled_precisions</span><span class="o">=</span> <span class="p">{</span><span class="n">torch</span><span class="o">.</span><span class="n">float32</span><span class="p">},</span> <span class="c1"># Run with 32-bit precision</span>
    <span class="n">workspace_size</span><span class="o">=</span><span class="mi">2000000000</span><span class="p">,</span>
    <span class="n">truncate_long_and_double</span><span class="o">=</span><span class="kc">True</span>
<span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <p>
        Pass the masked sentences into the compiled model and verify that the unmasked sentences yield the expected results.
       </p>
       <div class="nbinput docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[12]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">enc_inputs</span> <span class="o">=</span> <span class="n">enc</span><span class="p">(</span><span class="n">masked_sentences</span><span class="p">,</span> <span class="n">return_tensors</span><span class="o">=</span><span class="s1">'pt'</span><span class="p">,</span> <span class="n">padding</span><span class="o">=</span><span class="s1">'max_length'</span><span class="p">,</span> <span class="n">max_length</span><span class="o">=</span><span class="mi">128</span><span class="p">)</span>
<span class="n">enc_inputs</span> <span class="o">=</span> <span class="p">{</span><span class="n">k</span><span class="p">:</span> <span class="n">v</span><span class="o">.</span><span class="n">type</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span><span class="o">.</span><span class="n">cuda</span><span class="p">()</span> <span class="k">for</span> <span class="n">k</span><span class="p">,</span> <span class="n">v</span> <span class="ow">in</span> <span class="n">enc_inputs</span><span class="o">.</span><span class="n">items</span><span class="p">()}</span>
<span class="n">output_trt</span> <span class="o">=</span> <span class="n">trt_model</span><span class="p">(</span><span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'input_ids'</span><span class="p">],</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'token_type_ids'</span><span class="p">],</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'attention_mask'</span><span class="p">])</span>
<span class="n">most_likely_token_ids_trt</span> <span class="o">=</span> <span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">argmax</span><span class="p">(</span><span class="n">output_trt</span><span class="p">[</span><span class="n">i</span><span class="p">,</span> <span class="n">pos</span><span class="p">,</span> <span class="p">:])</span> <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">pos</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">pos_masks</span><span class="p">)]</span>
<span class="n">unmasked_tokens_trt</span> <span class="o">=</span> <span class="n">enc</span><span class="o">.</span><span class="n">decode</span><span class="p">(</span><span class="n">most_likely_token_ids_trt</span><span class="p">)</span><span class="o">.</span><span class="n">split</span><span class="p">(</span><span class="s1">' '</span><span class="p">)</span>
<span class="n">unmasked_sentences_trt</span> <span class="o">=</span> <span class="p">[</span><span class="n">masked_sentences</span><span class="p">[</span><span class="n">i</span><span class="p">]</span><span class="o">.</span><span class="n">replace</span><span class="p">(</span><span class="s1">'[MASK]'</span><span class="p">,</span> <span class="n">token</span><span class="p">)</span> <span class="k">for</span> <span class="n">i</span><span class="p">,</span> <span class="n">token</span> <span class="ow">in</span> <span class="nb">enumerate</span><span class="p">(</span><span class="n">unmasked_tokens_trt</span><span class="p">)]</span>
<span class="k">for</span> <span class="n">sentence</span> <span class="ow">in</span> <span class="n">unmasked_sentences_trt</span><span class="p">:</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">sentence</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <div class="nboutput nblast docutils container">
        <div class="prompt empty docutils container">
        </div>
        <div class="output_area docutils container">
         <div class="highlight">
          <pre>
Paris is the capital of France.
The primary language of the United States is English.
A baseball game consists of at least nine innings.
Topology is a branch of mathematics concerned with the properties of geometric objects that remain unchanged under continuous transformations.
</pre>
         </div>
        </div>
       </div>
       <p>
        Compile the model again, this time with 16-bit precision
       </p>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[13]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">trt_model_fp16</span> <span class="o">=</span> <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">compile</span><span class="p">(</span><span class="n">traced_mlm_model</span><span class="p">,</span>
    <span class="n">inputs</span><span class="o">=</span> <span class="p">[</span><span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">Input</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="n">batch_size</span><span class="p">,</span> <span class="mi">128</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">int32</span><span class="p">),</span>  <span class="c1"># input_ids</span>
             <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">Input</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="n">batch_size</span><span class="p">,</span> <span class="mi">128</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">int32</span><span class="p">),</span>  <span class="c1"># token_type_ids</span>
             <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">Input</span><span class="p">(</span><span class="n">shape</span><span class="o">=</span><span class="p">[</span><span class="n">batch_size</span><span class="p">,</span> <span class="mi">128</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">int32</span><span class="p">)],</span> <span class="c1"># attention_mask</span>
    <span class="n">enabled_precisions</span><span class="o">=</span> <span class="p">{</span><span class="n">torch</span><span class="o">.</span><span class="n">half</span><span class="p">},</span> <span class="c1"># Run with 16-bit precision</span>
    <span class="n">workspace_size</span><span class="o">=</span><span class="mi">2000000000</span><span class="p">,</span>
    <span class="n">truncate_long_and_double</span><span class="o">=</span><span class="kc">True</span>
<span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <p>
        ## 5. Benchmarking
       </p>
       <p>
        In developing this notebook, we conducted our benchmarking on a single NVIDIA A100 GPU. Your results may differ from those shown, particularly on a different GPU.
       </p>
       <p>
        This function passes the inputs into the model and runs inference
        <code class="docutils literal notranslate">
         <span class="pre">
          num_loops
         </span>
        </code>
        times, then returns a list of length containing the amount of time in seconds that each instance of inference took.
       </p>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[14]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="k">def</span> <span class="nf">timeGraph</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">input_tensor1</span><span class="p">,</span> <span class="n">input_tensor2</span><span class="p">,</span> <span class="n">input_tensor3</span><span class="p">,</span> <span class="n">num_loops</span><span class="o">=</span><span class="mi">50</span><span class="p">):</span>
    <span class="nb">print</span><span class="p">(</span><span class="s2">"Warm up ..."</span><span class="p">)</span>
    <span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
        <span class="k">for</span> <span class="n">_</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">20</span><span class="p">):</span>
            <span class="n">features</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="n">input_tensor1</span><span class="p">,</span> <span class="n">input_tensor2</span><span class="p">,</span> <span class="n">input_tensor3</span><span class="p">)</span>

    <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">synchronize</span><span class="p">()</span>

    <span class="nb">print</span><span class="p">(</span><span class="s2">"Start timing ..."</span><span class="p">)</span>
    <span class="n">timings</span> <span class="o">=</span> <span class="p">[]</span>
    <span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">no_grad</span><span class="p">():</span>
        <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">num_loops</span><span class="p">):</span>
            <span class="n">start_time</span> <span class="o">=</span> <span class="n">timeit</span><span class="o">.</span><span class="n">default_timer</span><span class="p">()</span>
            <span class="n">features</span> <span class="o">=</span> <span class="n">model</span><span class="p">(</span><span class="n">input_tensor1</span><span class="p">,</span> <span class="n">input_tensor2</span><span class="p">,</span> <span class="n">input_tensor3</span><span class="p">)</span>
            <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">synchronize</span><span class="p">()</span>
            <span class="n">end_time</span> <span class="o">=</span> <span class="n">timeit</span><span class="o">.</span><span class="n">default_timer</span><span class="p">()</span>
            <span class="n">timings</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">end_time</span> <span class="o">-</span> <span class="n">start_time</span><span class="p">)</span>
            <span class="c1"># print("Iteration {}: {:.6f} s".format(i, end_time - start_time))</span>

    <span class="k">return</span> <span class="n">timings</span>
</pre>
         </div>
        </div>
       </div>
       <p>
        This function prints the number of input batches the model is able to process each second and summary statistics of the model’s latency.
       </p>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[15]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="k">def</span> <span class="nf">printStats</span><span class="p">(</span><span class="n">graphName</span><span class="p">,</span> <span class="n">timings</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">):</span>
    <span class="n">times</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">timings</span><span class="p">)</span>
    <span class="n">steps</span> <span class="o">=</span> <span class="nb">len</span><span class="p">(</span><span class="n">times</span><span class="p">)</span>
    <span class="n">speeds</span> <span class="o">=</span> <span class="n">batch_size</span> <span class="o">/</span> <span class="n">times</span>
    <span class="n">time_mean</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="n">times</span><span class="p">)</span>
    <span class="n">time_med</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">median</span><span class="p">(</span><span class="n">times</span><span class="p">)</span>
    <span class="n">time_99th</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">percentile</span><span class="p">(</span><span class="n">times</span><span class="p">,</span> <span class="mi">99</span><span class="p">)</span>
    <span class="n">time_std</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">std</span><span class="p">(</span><span class="n">times</span><span class="p">,</span> <span class="n">ddof</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span>
    <span class="n">speed_mean</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">mean</span><span class="p">(</span><span class="n">speeds</span><span class="p">)</span>
    <span class="n">speed_med</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">median</span><span class="p">(</span><span class="n">speeds</span><span class="p">)</span>

    <span class="n">msg</span> <span class="o">=</span> <span class="p">(</span><span class="s2">"</span><span class="se">\n</span><span class="si">%s</span><span class="s2"> =================================</span><span class="se">\n</span><span class="s2">"</span>
            <span class="s2">"batch size=</span><span class="si">%d</span><span class="s2">, num iterations=</span><span class="si">%d</span><span class="se">\n</span><span class="s2">"</span>
            <span class="s2">"  Median text batches/second: </span><span class="si">%.1f</span><span class="s2">, mean: </span><span class="si">%.1f</span><span class="se">\n</span><span class="s2">"</span>
            <span class="s2">"  Median latency: </span><span class="si">%.6f</span><span class="s2">, mean: </span><span class="si">%.6f</span><span class="s2">, 99th_p: </span><span class="si">%.6f</span><span class="s2">, std_dev: </span><span class="si">%.6f</span><span class="se">\n</span><span class="s2">"</span>
            <span class="p">)</span> <span class="o">%</span> <span class="p">(</span><span class="n">graphName</span><span class="p">,</span>
                <span class="n">batch_size</span><span class="p">,</span> <span class="n">steps</span><span class="p">,</span>
                <span class="n">speed_med</span><span class="p">,</span> <span class="n">speed_mean</span><span class="p">,</span>
                <span class="n">time_med</span><span class="p">,</span> <span class="n">time_mean</span><span class="p">,</span> <span class="n">time_99th</span><span class="p">,</span> <span class="n">time_std</span><span class="p">)</span>
    <span class="nb">print</span><span class="p">(</span><span class="n">msg</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[16]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">cudnn</span><span class="o">.</span><span class="n">benchmark</span> <span class="o">=</span> <span class="kc">True</span>
</pre>
         </div>
        </div>
       </div>
       <p>
        Benchmark the (scripted) TorchScript model on GPU
       </p>
       <div class="nbinput docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[17]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">timings</span> <span class="o">=</span> <span class="n">timeGraph</span><span class="p">(</span><span class="n">mlm_model_ts</span><span class="o">.</span><span class="n">cuda</span><span class="p">(),</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'input_ids'</span><span class="p">],</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'token_type_ids'</span><span class="p">],</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'attention_mask'</span><span class="p">])</span>

<span class="n">printStats</span><span class="p">(</span><span class="s2">"BERT"</span><span class="p">,</span> <span class="n">timings</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <div class="nboutput nblast docutils container">
        <div class="prompt empty docutils container">
        </div>
        <div class="output_area docutils container">
         <div class="highlight">
          <pre>
Warm up ...
Start timing ...

BERT =================================
batch size=4, num iterations=50
  Median text batches/second: 599.1, mean: 597.6
  Median latency: 0.006677, mean: 0.006693, 99th_p: 0.006943, std_dev: 0.000059

</pre>
         </div>
        </div>
       </div>
       <p>
        Benchmark the traced model on GPU
       </p>
       <div class="nbinput docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[18]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">timings</span> <span class="o">=</span> <span class="n">timeGraph</span><span class="p">(</span><span class="n">traced_mlm_model</span><span class="o">.</span><span class="n">cuda</span><span class="p">(),</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'input_ids'</span><span class="p">],</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'token_type_ids'</span><span class="p">],</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'attention_mask'</span><span class="p">])</span>

<span class="n">printStats</span><span class="p">(</span><span class="s2">"BERT"</span><span class="p">,</span> <span class="n">timings</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <div class="nboutput nblast docutils container">
        <div class="prompt empty docutils container">
        </div>
        <div class="output_area docutils container">
         <div class="highlight">
          <pre>
Warm up ...
Start timing ...

BERT =================================
batch size=4, num iterations=50
  Median text batches/second: 951.2, mean: 951.0
  Median latency: 0.004205, mean: 0.004206, 99th_p: 0.004256, std_dev: 0.000015

</pre>
         </div>
        </div>
       </div>
       <p>
        Benchmark the compiled FP32 model on GPU
       </p>
       <div class="nbinput docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[19]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">timings</span> <span class="o">=</span> <span class="n">timeGraph</span><span class="p">(</span><span class="n">trt_model</span><span class="p">,</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'input_ids'</span><span class="p">],</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'token_type_ids'</span><span class="p">],</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'attention_mask'</span><span class="p">])</span>

<span class="n">printStats</span><span class="p">(</span><span class="s2">"BERT"</span><span class="p">,</span> <span class="n">timings</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <div class="nboutput nblast docutils container">
        <div class="prompt empty docutils container">
        </div>
        <div class="output_area docutils container">
         <div class="highlight">
          <pre>
Warm up ...
Start timing ...

BERT =================================
batch size=4, num iterations=50
  Median text batches/second: 1216.9, mean: 1216.4
  Median latency: 0.003287, mean: 0.003289, 99th_p: 0.003317, std_dev: 0.000007

</pre>
         </div>
        </div>
       </div>
       <p>
        Benchmark the compiled FP16 model on GPU
       </p>
       <div class="nbinput docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[20]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span><span class="n">timings</span> <span class="o">=</span> <span class="n">timeGraph</span><span class="p">(</span><span class="n">trt_model_fp16</span><span class="p">,</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'input_ids'</span><span class="p">],</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'token_type_ids'</span><span class="p">],</span> <span class="n">enc_inputs</span><span class="p">[</span><span class="s1">'attention_mask'</span><span class="p">])</span>

<span class="n">printStats</span><span class="p">(</span><span class="s2">"BERT"</span><span class="p">,</span> <span class="n">timings</span><span class="p">,</span> <span class="n">batch_size</span><span class="p">)</span>
</pre>
         </div>
        </div>
       </div>
       <div class="nboutput nblast docutils container">
        <div class="prompt empty docutils container">
        </div>
        <div class="output_area docutils container">
         <div class="highlight">
          <pre>
Warm up ...
Start timing ...

BERT =================================
batch size=4, num iterations=50
  Median text batches/second: 1776.7, mean: 1771.1
  Median latency: 0.002251, mean: 0.002259, 99th_p: 0.002305, std_dev: 0.000015

</pre>
         </div>
        </div>
       </div>
       <p>
        ## 6. Conclusion
       </p>
       <p>
        In this notebook, we have walked through the complete process of compiling TorchScript models with Torch-TensorRT for Masked Language Modeling with Hugging Face’s
        <code class="docutils literal notranslate">
         <span class="pre">
          bert-base-uncased
         </span>
        </code>
        transformer and testing the performance impact of the optimization. With Torch-TensorRT on an NVIDIA A100 GPU, we observe the speedups indicated below. These acceleration numbers will vary from GPU to GPU (as well as implementation to implementation based on the ops used) and we encorage you to try out latest
generation of Data center compute cards for maximum acceleration.
       </p>
       <p>
        Scripted (GPU): 1.0x Traced (GPU): 1.62x Torch-TensorRT (FP32): 2.14x Torch-TensorRT (FP16): 3.15x
       </p>
       <h3 id="What’s-next">
        What’s next
        <a class="headerlink" href="#What’s-next" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <p>
        Now it’s time to try Torch-TensorRT on your own model. If you run into any issues, you can fill them at
        <a class="reference external" href="https://github.com/NVIDIA/Torch-TensorRT">
         https://github.com/NVIDIA/Torch-TensorRT
        </a>
        . Your involvement will help future development of Torch-TensorRT.
       </p>
       <div class="nbinput nblast docutils container">
        <div class="prompt highlight-none notranslate">
         <div class="highlight">
          <pre><span></span>[ ]:
</pre>
         </div>
        </div>
        <div class="input_area highlight-ipython3 notranslate">
         <div class="highlight">
          <pre>
<span></span>
</pre>
         </div>
        </div>
       </div>
      </article>
     </div>
    </div>
   </main>
  </div>
  <footer class="md-footer">
   <div class="md-footer-nav">
    <nav class="md-footer-nav__inner md-grid">
     <a class="md-flex md-footer-nav__link md-footer-nav__link--prev" href="EfficientNet-example.html" rel="prev" title="Torch-TensorRT Getting Started - EfficientNet-B0">
      <div class="md-flex__cell md-flex__cell--shrink">
       <i class="md-icon md-icon--arrow-back md-footer-nav__button">
       </i>
      </div>
      <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title">
       <span class="md-flex__ellipsis">
        <span class="md-footer-nav__direction">
         Previous
        </span>
        Torch-TensorRT Getting Started - EfficientNet-B0
       </span>
      </div>
     </a>
     <a class="md-flex md-footer-nav__link md-footer-nav__link--next" href="lenet-getting-started.html" rel="next" title="Torch-TensorRT Getting Started - LeNet">
      <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title">
       <span class="md-flex__ellipsis">
        <span class="md-footer-nav__direction">
         Next
        </span>
        Torch-TensorRT Getting Started - LeNet
       </span>
      </div>
      <div class="md-flex__cell md-flex__cell--shrink">
       <i class="md-icon md-icon--arrow-forward md-footer-nav__button">
       </i>
      </div>
     </a>
    </nav>
   </div>
   <div class="md-footer-meta md-typeset">
    <div class="md-footer-meta__inner md-grid">
     <div class="md-footer-copyright">
      <div class="md-footer-copyright__highlight">
       © Copyright 2021, NVIDIA Corporation.
      </div>
      Created using
      <a href="http://www.sphinx-doc.org/">
       Sphinx
      </a>
      3.1.2.
             and
      <a href="https://github.com/bashtage/sphinx-material/">
       Material for
              Sphinx
      </a>
     </div>
    </div>
   </div>
  </footer>
  <script src="../_static/javascripts/application.js">
  </script>
  <script>
   app.initialize({version: "1.0.4", url: {base: ".."}})
  </script>
 </body>
</html>