<!DOCTYPE html>
<html>
 <head>
  <meta charset="utf-8"/>
  <meta content="width=device-width, initial-scale=1.0" name="viewport"/>
  <meta content="width=device-width,initial-scale=1" name="viewport"/>
  <meta content="ie=edge" http-equiv="x-ua-compatible"/>
  <meta content="Copy to clipboard" name="lang:clipboard.copy"/>
  <meta content="Copied to clipboard" name="lang:clipboard.copied"/>
  <meta content="en" name="lang:search.language"/>
  <meta content="True" name="lang:search.pipeline.stopwords"/>
  <meta content="True" name="lang:search.pipeline.trimmer"/>
  <meta content="No matching documents" name="lang:search.result.none"/>
  <meta content="1 matching document" name="lang:search.result.one"/>
  <meta content="# matching documents" name="lang:search.result.other"/>
  <meta content="[\s\-]+" name="lang:search.tokenizer"/>
  <link crossorigin="" href="https://fonts.gstatic.com/" rel="preconnect"/>
  <link href="https://fonts.googleapis.com/css?family=Roboto+Mono:400,500,700|Roboto:300,400,400i,700&amp;display=fallback" rel="stylesheet"/>
  <style>
   body,
      input {
        font-family: "Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif
      }

      code,
      kbd,
      pre {
        font-family: "Roboto Mono", "Courier New", Courier, monospace
      }
  </style>
  <link href="../_static/stylesheets/application.css" rel="stylesheet"/>
  <link href="../_static/stylesheets/application-palette.css" rel="stylesheet"/>
  <link href="../_static/stylesheets/application-fixes.css" rel="stylesheet"/>
  <link href="../_static/fonts/material-icons.css" rel="stylesheet"/>
  <meta content="84bd00" name="theme-color"/>
  <script src="../_static/javascripts/modernizr.js">
  </script>
  <title>
   Lowering Phase — TRTorch v0.1.0 documentation
  </title>
  <link href="../_static/material.css" rel="stylesheet" type="text/css"/>
  <link href="../_static/pygments.css" rel="stylesheet" type="text/css"/>
  <link href="../_static/collapsible-lists/css/tree_view.css" rel="stylesheet" type="text/css"/>
  <script data-url_root="../" id="documentation_options" src="../_static/documentation_options.js">
  </script>
  <script src="../_static/jquery.js">
  </script>
  <script src="../_static/underscore.js">
  </script>
  <script src="../_static/doctools.js">
  </script>
  <script src="../_static/language_data.js">
  </script>
  <script src="../_static/collapsible-lists/js/CollapsibleLists.compressed.js">
  </script>
  <script src="../_static/collapsible-lists/js/apply-collapsible-lists.js">
  </script>
  <script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js">
  </script>
  <link href="../genindex.html" rel="index" title="Index"/>
  <link href="../search.html" rel="search" title="Search"/>
  <link href="conversion.html" rel="next" title="Conversion Phase"/>
  <link href="system_overview.html" rel="prev" title="System Overview"/>
 </head>
 <body data-md-color-accent="light-green" data-md-color-primary="light-green" dir="ltr">
  <svg class="md-svg">
   <defs data-children-count="0">
    <svg height="448" id="__github" viewbox="0 0 416 448" width="416" xmlns="http://www.w3.org/2000/svg">
     <path d="M160 304q0 10-3.125 20.5t-10.75 19T128 352t-18.125-8.5-10.75-19T96 304t3.125-20.5 10.75-19T128 256t18.125 8.5 10.75 19T160 304zm160 0q0 10-3.125 20.5t-10.75 19T288 352t-18.125-8.5-10.75-19T256 304t3.125-20.5 10.75-19T288 256t18.125 8.5 10.75 19T320 304zm40 0q0-30-17.25-51T296 232q-10.25 0-48.75 5.25Q229.5 240 208 240t-39.25-2.75Q130.75 232 120 232q-29.5 0-46.75 21T56 304q0 22 8 38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0 37.25-1.75t35-7.375 30.5-15 20.25-25.75T360 304zm56-44q0 51.75-15.25 82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5T212 416q-19.5 0-35.5-.75t-36.875-3.125-38.125-7.5-34.25-12.875T37 371.5t-21.5-28.75Q0 312 0 260q0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25 30.875Q171.5 96 212 96q37 0 70 8 26.25-20.5 46.75-30.25T376 64q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34 99.5z" fill="currentColor">
     </path>
    </svg>
   </defs>
  </svg>
  <input class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
  <input class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
  <label class="md-overlay" data-md-component="overlay" for="__drawer">
  </label>
  <a class="md-skip" href="#contributors/lowering" tabindex="1">
   Skip to content
  </a>
  <header class="md-header" data-md-component="header">
   <nav class="md-header-nav md-grid">
    <div class="md-flex navheader">
     <div class="md-flex__cell md-flex__cell--shrink">
      <a class="md-header-nav__button md-logo" href="../index.html" title="TRTorch v0.1.0 documentation">
       <i class="md-icon">
        
       </i>
      </a>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink">
      <label class="md-icon md-icon--menu md-header-nav__button" for="__drawer">
      </label>
     </div>
     <div class="md-flex__cell md-flex__cell--stretch">
      <div class="md-flex__ellipsis md-header-nav__title" data-md-component="title">
       <span class="md-header-nav__topic">
        TRTorch
       </span>
       <span class="md-header-nav__topic">
        Lowering Phase
       </span>
      </div>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink">
      <label class="md-icon md-icon--search md-header-nav__button" for="__search">
      </label>
      <div class="md-search" data-md-component="search" role="dialog">
       <label class="md-search__overlay" for="__search">
       </label>
       <div class="md-search__inner" role="search">
        <form action="../search.html" class="md-search__form" method="GET" name="search">
         <input autocapitalize="off" autocomplete="off" class="md-search__input" data-md-component="query" data-md-state="active" name="q" placeholder="Search" spellcheck="false" type="text"/>
         <label class="md-icon md-search__icon" for="__search">
         </label>
         <button class="md-icon md-search__icon" data-md-component="reset" tabindex="-1" type="reset">
          
         </button>
        </form>
        <div class="md-search__output">
         <div class="md-search__scrollwrap" data-md-scrollfix="">
          <div class="md-search-result" data-md-component="result">
           <div class="md-search-result__meta">
            Type to start searching
           </div>
           <ol class="md-search-result__list">
           </ol>
          </div>
         </div>
        </div>
       </div>
      </div>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink">
      <div class="md-header-nav__source">
       <a class="md-source" data-md-source="github" href="https://github.com/nvidia/TRTorch/" title="Go to repository">
        <div class="md-source__icon">
         <svg height="28" viewbox="0 0 24 24" width="28" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
          <use height="24" width="24" xlink:href="#__github">
          </use>
         </svg>
        </div>
        <div class="md-source__repository">
         TRTorch
        </div>
       </a>
      </div>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink dropdown">
      <button class="dropdownbutton">
       Versions
      </button>
      <div class="dropdown-content md-hero">
       <a href="https://nvidia.github.io/TRTorch/" title="master">
        master
       </a>
       <a href="https://nvidia.github.io/TRTorch/v0.1.0/" title="v0.1.0">
        v0.1.0
       </a>
       <a href="https://nvidia.github.io/TRTorch/v0.0.3/" title="v0.0.3">
        v0.0.3
       </a>
       <a href="https://nvidia.github.io/TRTorch/v0.0.2/" title="v0.0.2">
        v0.0.2
       </a>
       <a href="https://nvidia.github.io/TRTorch/v0.0.1/" title="v0.0.1">
        v0.0.1
       </a>
      </div>
     </div>
    </div>
   </nav>
  </header>
  <div class="md-container">
   <nav class="md-tabs" data-md-component="tabs">
    <div class="md-tabs__inner md-grid">
     <ul class="md-tabs__list">
      <li class="md-tabs__item">
       <a class="md-tabs__link" href="../index.html">
        TRTorch v0.1.0 documentation
       </a>
      </li>
      <li class="md-tabs__item">
       <a class="md-tabs__link" href="system_overview.html">
        System Overview
       </a>
      </li>
     </ul>
    </div>
   </nav>
   <main class="md-main">
    <div class="md-main__inner md-grid" data-md-component="container">
     <div class="md-sidebar md-sidebar--primary" data-md-component="navigation">
      <div class="md-sidebar__scrollwrap">
       <div class="md-sidebar__inner">
        <nav class="md-nav md-nav--primary" data-md-level="0">
         <label class="md-nav__title md-nav__title--site" for="__drawer">
          <a class="md-nav__button md-logo" href="../index.html" title="TRTorch v0.1.0 documentation">
           <i class="md-icon">
            
           </i>
          </a>
          <a href="../index.html" title="TRTorch v0.1.0 documentation">
           TRTorch
          </a>
         </label>
         <div class="md-nav__source">
          <a class="md-source" data-md-source="github" href="https://github.com/nvidia/TRTorch/" title="Go to repository">
           <div class="md-source__icon">
            <svg height="28" viewbox="0 0 24 24" width="28" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
             <use height="24" width="24" xlink:href="#__github">
             </use>
            </svg>
           </div>
           <div class="md-source__repository">
            TRTorch
           </div>
          </a>
         </div>
         <ul class="md-nav__list">
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Getting Started
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/installation.html">
            Installation
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/getting_started.html">
            Getting Started
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/ptq.html">
            Post Training Quantization (PTQ)
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/trtorchc.html">
            trtorchc
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/use_from_pytorch.html">
            Using TRTorch Directly From PyTorch
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Notebooks
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_notebooks/lenet-getting-started.html">
            TRTorch Getting Started - LeNet
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_notebooks/ssd-object-detection-demo.html">
            Object Detection with TRTorch (SSD)
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Python API Documenation
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../py_api/trtorch.html">
            trtorch
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../py_api/logging.html">
            trtorch.logging
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             C++ API Documenation
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/trtorch_cpp.html">
            TRTorch C++ API
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Contributor Documentation
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="system_overview.html">
            System Overview
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="writing_converters.html">
            Writing Converters
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="useful_links.html">
            Useful Links for TRTorch Development
           </a>
          </li>
         </ul>
        </nav>
       </div>
      </div>
     </div>
     <div class="md-sidebar md-sidebar--secondary" data-md-component="toc">
      <div class="md-sidebar__scrollwrap">
       <div class="md-sidebar__inner">
        <nav class="md-nav md-nav--secondary">
         <label class="md-nav__title" for="__toc">
          Contents
         </label>
         <ul class="md-nav__list" data-md-scrollfix="">
          <li class="md-nav__item">
           <a class="md-nav__link" href="#contributors-lowering--page-root">
            Lowering Phase
           </a>
           <nav class="md-nav">
            <ul class="md-nav__list">
             <li class="md-nav__item">
              <a class="md-nav__link" href="#passes-used">
               Passes Used
              </a>
              <nav class="md-nav">
               <ul class="md-nav__list">
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#eliminatecommonsubexpression">
                  EliminateCommonSubexpression
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#eliminate-dead-code">
                  Eliminate Dead Code
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#eliminate-exeception-or-pass-pattern">
                  Eliminate Exeception Or Pass Pattern
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#eliminate-redundant-gaurds">
                  Eliminate Redundant Gaurds
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#freeze-module">
                  Freeze Module
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#fuse-addmm-branches">
                  Fuse AddMM Branches
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#fuse-linear">
                  Fuse Linear
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#fuse-flatten-linear">
                  Fuse Flatten Linear
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#lower-graph">
                  Lower Graph
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#lower-tuples">
                  Lower Tuples
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#peephole-optimze">
                  Peephole Optimze
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#remove-contiguous">
                  Remove Contiguous
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#remove-dropout">
                  Remove Dropout
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#remove-to">
                  Remove To
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#unpack-addmm">
                  Unpack AddMM
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#unpack-logsoftmax">
                  Unpack LogSoftmax
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#unroll-loops">
                  Unroll Loops
                 </a>
                </li>
               </ul>
              </nav>
             </li>
            </ul>
           </nav>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__extra_link" href="../_sources/contributors/lowering.rst.txt">
            Show Source
           </a>
          </li>
          <li class="md-nav__item" id="searchbox">
          </li>
         </ul>
        </nav>
       </div>
      </div>
     </div>
     <div class="md-content">
      <article class="md-content__inner md-typeset" role="main">
       <span id="lowering">
       </span>
       <h1 id="contributors-lowering--page-root">
        Lowering Phase
        <a class="headerlink" href="#contributors-lowering--page-root" title="Permalink to this headline">
         ¶
        </a>
       </h1>
       <p>
        The lowering phase is made up out of passes which are operations which map a graph from a high level representation
to a lower level one. Each pass does something specific for instance inlining method calls. The idea is to
significantly reduce what the conversion phase needs to be able to handle when actually mapping to TensorRT.
We aim for closer to 1-&gt;1 op conversion vs looking for applicable subgraphs, limiting the number of converters and
reduce the scope of each converter.
       </p>
       <p>
        You can see the effects of each pass by setting the log level to
        <code class="docutils literal notranslate">
         <span class="pre">
          Level::kGraph
         </span>
        </code>
       </p>
       <h2 id="passes-used">
        Passes Used
        <a class="headerlink" href="#passes-used" title="Permalink to this headline">
         ¶
        </a>
       </h2>
       <h3 id="eliminatecommonsubexpression">
        EliminateCommonSubexpression
        <a class="headerlink" href="#eliminatecommonsubexpression" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/passes/common_subexpression_elimination.h">
           torch/csrc/jit/passes/common_subexpression_elimination.h
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Removes common subexpressions in the graph
       </p>
       <h3 id="eliminate-dead-code">
        Eliminate Dead Code
        <a class="headerlink" href="#eliminate-dead-code" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/passes/dead_code_elimination.h">
           torch/csrc/jit/passes/dead_code_elimination.h
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Dead code elimination will check if a node has side effects and not delete it if it does.
       </p>
       <h3 id="eliminate-exeception-or-pass-pattern">
        Eliminate Exeception Or Pass Pattern
        <a class="headerlink" href="#eliminate-exeception-or-pass-pattern" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/nvidia/trtorch/blob/master/core/lowering/passes/exception_elimination.cpp">
           trtorch/core/lowering/passes/exception_elimination.cpp
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        A common pattern in scripted modules are dimension gaurds which will throw execptions if
the input dimension is not what was expected.
       </p>
       <div class="highlight-none notranslate">
        <div class="highlight">
         <pre><span></span>%1013 : bool = aten::ne(%1012, %24) # ~/.local/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py:248:11
    = prim::If(%1013) # ~/.local/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py:248:8
    block0():
        = prim::RaiseException(%23) # ~/.local/lib/python3.6/site-packages/torch/nn/modules/batchnorm.py:249:12
    -&gt; ()
    block1():
    -&gt; ()
</pre>
        </div>
       </div>
       <p>
        Since we are resolving all of this at compile time and there are no execptions in the TensorRT graph, we just remove it.
       </p>
       <h3 id="eliminate-redundant-gaurds">
        Eliminate Redundant Gaurds
        <a class="headerlink" href="#eliminate-redundant-gaurds" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/passes/guard_elimination.h">
           torch/csrc/jit/passes/guard_elimination.h
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Eliminate redundant guards for ops whose outputs are fully determined by their inputs i.e. if inputs to such ops are
guarded we are allowed to remove a guard on ops’ outputs
       </p>
       <h3 id="freeze-module">
        Freeze Module
        <a class="headerlink" href="#freeze-module" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/passes/freeze_module.h">
           torch/csrc/jit/passes/freeze_module.h
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Freeze attributes and inline constants and modules. Propogates constants in the graph.
       </p>
       <h3 id="fuse-addmm-branches">
        Fuse AddMM Branches
        <a class="headerlink" href="#fuse-addmm-branches" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/nvidia/trtorch/blob/master/core/lowering/passes/fuse_addmm_branches.cpp">
           trtorch/core/lowering/passes/fuse_addmm_branches.cpp
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        A common pattern in scripted modules is tensors of different dimensions use different constructions for implementing linear layers. We fuse these
different varients into a single one that will get caught by the Unpack AddMM pass.
       </p>
       <div class="highlight-none notranslate">
        <div class="highlight">
         <pre><span></span>%ret : Tensor = prim::If(%622)
block0():
  %ret.1 : Tensor = aten::addmm(%self.fc.bias, %x9.1, %3677, %3, %3)
  -&gt; (%ret.1)
block1():
  %output.1 : Tensor = aten::matmul(%x9.1, %3677)
  %output0.1 : Tensor = aten::add_(%output.1, %self.fc.bias, %3)
  -&gt; (%output0.1)
</pre>
        </div>
       </div>
       <p>
        We fuse this set of blocks into a graph like this:
       </p>
       <div class="highlight-none notranslate">
        <div class="highlight">
         <pre><span></span>%ret : Tensor = aten::addmm(%self.fc.bias, %x9.1, %3677, %3, %3)
</pre>
        </div>
       </div>
       <h3 id="fuse-linear">
        Fuse Linear
        <a class="headerlink" href="#fuse-linear" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/passes/fuse_linear.h">
           torch/csrc/jit/passes/fuse_linear.h
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Match the
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::linear
         </span>
        </code>
        pattern and fuse it into a single
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::linear
         </span>
        </code>
        This pass fuse the addmm or matmul + add generated by JIT back to linear
       </p>
       <h3 id="fuse-flatten-linear">
        Fuse Flatten Linear
        <a class="headerlink" href="#fuse-flatten-linear" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/nvidia/trtorch/blob/master/core/lowering/passes/fuse_flatten_linear.cpp">
           trtorch/core/lowering/passes/fuse_flatten_linear.cpp
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        TensorRT implicity flattens input layers into fully connected layers when they are higher than 1D. So when there is a
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::flatten
         </span>
        </code>
        -&gt;
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::linear
         </span>
        </code>
        pattern we remove the
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::flatten
         </span>
        </code>
        .
       </p>
       <h3 id="lower-graph">
        Lower Graph
        <a class="headerlink" href="#lower-graph" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/passes/lower_graph.h">
           torch/csrc/jit/passes/lower_graph.h
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Given a graph with of a method which first argument is %self, lower it to a graph where
all attributes accesses are replaced with explicit inputs of the graph
(rather than results of prim::GetAttr executed on %self). Returns a tuple
(graph, parameters) where the last module.parameters.size() inputs to the
graph are the trainable parameters used in this method. The remaining inputs
are the true inputs to the function.
       </p>
       <h3 id="lower-tuples">
        Lower Tuples
        <a class="headerlink" href="#lower-tuples" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/passes/lower_tuples.h">
           torch/csrc/jit/passes/lower_tuples.h
          </a>
         </p>
        </div>
       </blockquote>
       <ul class="simple">
        <li>
         <p>
          <code class="docutils literal notranslate">
           <span class="pre">
            LowerSimpleTuples
           </span>
          </code>
          :
         </p>
        </li>
       </ul>
       <p>
        Removes tuples where TupleConstruct and TupleUnpack are matched but leaves tuples in place across if statements, loops, and as inputs/outputs
       </p>
       <ul class="simple">
        <li>
         <p>
          <code class="docutils literal notranslate">
           <span class="pre">
            LowerAllTuples
           </span>
          </code>
          :
         </p>
        </li>
       </ul>
       <p>
        Removes _all_ tuples and raises an error if some cannot be removed, this is used by ONNX to ensure there are not tuples before conversion, but will not work on graphs whose inputs contain tuples.
       </p>
       <h3 id="peephole-optimze">
        Peephole Optimze
        <a class="headerlink" href="#peephole-optimze" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/passes/ppeephole_optimze.h">
           torch/csrc/jit/passes/peephole_optimze.h
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        The intent for this optimization pass is to catch all of the small, easy to catch peephole optimizations you might be interested in doing.
       </p>
       <dl class="simple">
        <dt>
         Right now, it does:
        </dt>
        <dd>
         <ul class="simple">
          <li>
           <p>
            Eliminate no-op ‘expand’ nodes
           </p>
          </li>
          <li>
           <p>
            Simply x.t().t() to x
           </p>
          </li>
         </ul>
        </dd>
       </dl>
       <h3 id="remove-contiguous">
        Remove Contiguous
        <a class="headerlink" href="#remove-contiguous" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/nvidia/trtorch/blob/master/core/lowering/passes/remove_contiguous.cpp">
           trtorch/core/lowering/passes/remove_contiguous.cpp
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Removes contiguous operators since we are doing TensorRT memory is already contiguous.
       </p>
       <h3 id="remove-dropout">
        Remove Dropout
        <a class="headerlink" href="#remove-dropout" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/nvidia/trtorch/blob/master/core/lowering/passes/remove_dropout.cpp">
           trtorch/core/lowering/passes/remove_dropout.cpp
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Removes dropout operators since we are doing inference.
       </p>
       <h3 id="remove-to">
        Remove To
        <a class="headerlink" href="#remove-to" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/nvidia/trtorch/blob/master/core/lowering/passes/remove_to.cpp">
           trtorch/core/lowering/passes/remove_to.cpp
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Removes
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::to
         </span>
        </code>
        operators that do casting, since TensorRT mangages it itself. It is important that this is one of the last passes run so that
other passes have a change to move required cast operators out of the main namespace.
       </p>
       <h3 id="unpack-addmm">
        Unpack AddMM
        <a class="headerlink" href="#unpack-addmm" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/nvidia/trtorch/blob/master/core/lowering/passes/unpack_addmm.cpp">
           trtorch/core/lowering/passes/unpack_addmm.cpp
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Unpacks
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::addmm
         </span>
        </code>
        into
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::matmul
         </span>
        </code>
        and
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::add_
         </span>
        </code>
        (with an additional
        <code class="docutils literal notranslate">
         <span class="pre">
          trt::const
         </span>
        </code>
        op to freeze the bias in the TensorRT graph). This lets us reuse the
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::matmul
         </span>
        </code>
        and
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::add_
         </span>
        </code>
        converters instead of needing a dedicated converter.
       </p>
       <h3 id="unpack-logsoftmax">
        Unpack LogSoftmax
        <a class="headerlink" href="#unpack-logsoftmax" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/nvidia/trtorch/blob/master/core/lowering/passes/unpack_log_softmax.cpp">
           trtorch/core/lowering/passes/unpack_log_softmax.cpp
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Unpacks
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::logsoftmax
         </span>
        </code>
        into
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::softmax
         </span>
        </code>
        and
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::log
         </span>
        </code>
        . This lets us reuse the
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::softmax
         </span>
        </code>
        and
        <code class="docutils literal notranslate">
         <span class="pre">
          aten::log
         </span>
        </code>
        converters instead of needing a dedicated converter.
       </p>
       <h3 id="unroll-loops">
        Unroll Loops
        <a class="headerlink" href="#unroll-loops" title="Permalink to this headline">
         ¶
        </a>
       </h3>
       <blockquote>
        <div>
         <p>
          <a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/passes/loop_unrolling.h">
           torch/csrc/jit/passes/loop_unrolling.h
          </a>
         </p>
        </div>
       </blockquote>
       <p>
        Unrolls the operations of compatable loops (e.g. sufficently short) so that you only have to go through the loop once.
       </p>
      </article>
     </div>
    </div>
   </main>
  </div>
  <footer class="md-footer">
   <div class="md-footer-nav">
    <nav class="md-footer-nav__inner md-grid">
     <a class="md-flex md-footer-nav__link md-footer-nav__link--prev" href="system_overview.html" rel="prev" title="System Overview">
      <div class="md-flex__cell md-flex__cell--shrink">
       <i class="md-icon md-icon--arrow-back md-footer-nav__button">
       </i>
      </div>
      <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title">
       <span class="md-flex__ellipsis">
        <span class="md-footer-nav__direction">
         Previous
        </span>
        System Overview
       </span>
      </div>
     </a>
     <a class="md-flex md-footer-nav__link md-footer-nav__link--next" href="conversion.html" rel="next" title="Conversion Phase">
      <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title">
       <span class="md-flex__ellipsis">
        <span class="md-footer-nav__direction">
         Next
        </span>
        Conversion Phase
       </span>
      </div>
      <div class="md-flex__cell md-flex__cell--shrink">
       <i class="md-icon md-icon--arrow-forward md-footer-nav__button">
       </i>
      </div>
     </a>
    </nav>
   </div>
   <div class="md-footer-meta md-typeset">
    <div class="md-footer-meta__inner md-grid">
     <div class="md-footer-copyright">
      <div class="md-footer-copyright__highlight">
       © Copyright 2020, NVIDIA Corporation.
      </div>
      Created using
      <a href="http://www.sphinx-doc.org/">
       Sphinx
      </a>
      3.1.2.
             and
      <a href="https://github.com/bashtage/sphinx-material/">
       Material for
              Sphinx
      </a>
     </div>
    </div>
   </div>
  </footer>
  <script src="../_static/javascripts/application.js">
  </script>
  <script>
   app.initialize({version: "1.0.4", url: {base: ".."}})
  </script>
 </body>
</html>