<!DOCTYPE html>
<html>
 <head>
  <meta charset="utf-8"/>
  <meta content="width=device-width, initial-scale=1.0" name="viewport"/>
  <meta content="Docutils 0.17.1: http://docutils.sourceforge.net/" name="generator"/>
  <meta content="width=device-width,initial-scale=1" name="viewport"/>
  <meta content="ie=edge" http-equiv="x-ua-compatible"/>
  <meta content="Copy to clipboard" name="lang:clipboard.copy"/>
  <meta content="Copied to clipboard" name="lang:clipboard.copied"/>
  <meta content="en" name="lang:search.language"/>
  <meta content="True" name="lang:search.pipeline.stopwords"/>
  <meta content="True" name="lang:search.pipeline.trimmer"/>
  <meta content="No matching documents" name="lang:search.result.none"/>
  <meta content="1 matching document" name="lang:search.result.one"/>
  <meta content="# matching documents" name="lang:search.result.other"/>
  <meta content="[\s\-]+" name="lang:search.tokenizer"/>
  <link crossorigin="" href="https://fonts.gstatic.com/" rel="preconnect"/>
  <link href="https://fonts.googleapis.com/css?family=Roboto+Mono:400,500,700|Roboto:300,400,400i,700&amp;display=fallback" rel="stylesheet"/>
  <style>
   body,
      input {
        font-family: "Roboto", "Helvetica Neue", Helvetica, Arial, sans-serif
      }

      code,
      kbd,
      pre {
        font-family: "Roboto Mono", "Courier New", Courier, monospace
      }
  </style>
  <link href="../_static/stylesheets/application.css" rel="stylesheet"/>
  <link href="../_static/stylesheets/application-palette.css" rel="stylesheet"/>
  <link href="../_static/stylesheets/application-fixes.css" rel="stylesheet"/>
  <link href="../_static/fonts/material-icons.css" rel="stylesheet"/>
  <meta content="84bd00" name="theme-color"/>
  <script src="../_static/javascripts/modernizr.js">
  </script>
  <title>
   Operators Supported — Torch-TensorRT v1.1.0 documentation
  </title>
  <link href="../_static/pygments.css" rel="stylesheet" type="text/css"/>
  <link href="../_static/material.css" rel="stylesheet" type="text/css"/>
  <link href="../_static/collapsible-lists/css/tree_view.css" rel="stylesheet" type="text/css"/>
  <script data-url_root="../" id="documentation_options" src="../_static/documentation_options.js">
  </script>
  <script src="../_static/jquery.js">
  </script>
  <script src="../_static/underscore.js">
  </script>
  <script src="../_static/doctools.js">
  </script>
  <script src="../_static/collapsible-lists/js/CollapsibleLists.compressed.js">
  </script>
  <script src="../_static/collapsible-lists/js/apply-collapsible-lists.js">
  </script>
  <script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js">
  </script>
  <link href="../genindex.html" rel="index" title="Index"/>
  <link href="../search.html" rel="search" title="Search"/>
  <link href="../contributors/useful_links.html" rel="prev" title="Useful Links for Torch-TensorRT Development"/>
 </head>
 <body data-md-color-accent="light-green" data-md-color-primary="light-green" dir="ltr">
  <svg class="md-svg">
   <defs data-children-count="0">
    <svg height="448" id="__github" viewbox="0 0 416 448" width="416" xmlns="http://www.w3.org/2000/svg">
     <path d="M160 304q0 10-3.125 20.5t-10.75 19T128 352t-18.125-8.5-10.75-19T96 304t3.125-20.5 10.75-19T128 256t18.125 8.5 10.75 19T160 304zm160 0q0 10-3.125 20.5t-10.75 19T288 352t-18.125-8.5-10.75-19T256 304t3.125-20.5 10.75-19T288 256t18.125 8.5 10.75 19T320 304zm40 0q0-30-17.25-51T296 232q-10.25 0-48.75 5.25Q229.5 240 208 240t-39.25-2.75Q130.75 232 120 232q-29.5 0-46.75 21T56 304q0 22 8 38.375t20.25 25.75 30.5 15 35 7.375 37.25 1.75h42q20.5 0 37.25-1.75t35-7.375 30.5-15 20.25-25.75T360 304zm56-44q0 51.75-15.25 82.75-9.5 19.25-26.375 33.25t-35.25 21.5-42.5 11.875-42.875 5.5T212 416q-19.5 0-35.5-.75t-36.875-3.125-38.125-7.5-34.25-12.875T37 371.5t-21.5-28.75Q0 312 0 260q0-59.25 34-99-6.75-20.5-6.75-42.5 0-29 12.75-54.5 27 0 47.5 9.875t47.25 30.875Q171.5 96 212 96q37 0 70 8 26.25-20.5 46.75-30.25T376 64q12.75 25.5 12.75 54.5 0 21.75-6.75 42 34 40 34 99.5z" fill="currentColor">
     </path>
    </svg>
   </defs>
  </svg>
  <input class="md-toggle" data-md-toggle="drawer" id="__drawer" type="checkbox"/>
  <input class="md-toggle" data-md-toggle="search" id="__search" type="checkbox"/>
  <label class="md-overlay" data-md-component="overlay" for="__drawer">
  </label>
  <a class="md-skip" href="#indices/supported_ops" tabindex="1">
   Skip to content
  </a>
  <header class="md-header" data-md-component="header">
   <nav class="md-header-nav md-grid">
    <div class="md-flex navheader">
     <div class="md-flex__cell md-flex__cell--shrink">
      <a class="md-header-nav__button md-logo" href="../index.html" title="Torch-TensorRT v1.1.0 documentation">
       <i class="md-icon">
        
       </i>
      </a>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink">
      <label class="md-icon md-icon--menu md-header-nav__button" for="__drawer">
      </label>
     </div>
     <div class="md-flex__cell md-flex__cell--stretch">
      <div class="md-flex__ellipsis md-header-nav__title" data-md-component="title">
       <span class="md-header-nav__topic">
        Torch-TensorRT
       </span>
       <span class="md-header-nav__topic">
        Operators Supported
       </span>
      </div>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink">
      <label class="md-icon md-icon--search md-header-nav__button" for="__search">
      </label>
      <div class="md-search" data-md-component="search" role="dialog">
       <label class="md-search__overlay" for="__search">
       </label>
       <div class="md-search__inner" role="search">
        <form action="../search.html" class="md-search__form" method="get" name="search">
         <input autocapitalize="off" autocomplete="off" class="md-search__input" data-md-component="query" data-md-state="active" name="q" placeholder="Search" spellcheck="false" type="text"/>
         <label class="md-icon md-search__icon" for="__search">
         </label>
         <button class="md-icon md-search__icon" data-md-component="reset" tabindex="-1" type="reset">
          
         </button>
        </form>
        <div class="md-search__output">
         <div class="md-search__scrollwrap" data-md-scrollfix="">
          <div class="md-search-result" data-md-component="result">
           <div class="md-search-result__meta">
            Type to start searching
           </div>
           <ol class="md-search-result__list">
           </ol>
          </div>
         </div>
        </div>
       </div>
      </div>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink">
      <div class="md-header-nav__source">
       <a class="md-source" data-md-source="github" href="https://github.com/nvidia/Torch-TensorRT/" title="Go to repository">
        <div class="md-source__icon">
         <svg height="28" viewbox="0 0 24 24" width="28" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
          <use height="24" width="24" xlink:href="#__github">
          </use>
         </svg>
        </div>
        <div class="md-source__repository">
         Torch-TensorRT
        </div>
       </a>
      </div>
     </div>
     <div class="md-flex__cell md-flex__cell--shrink dropdown">
      <button class="dropdownbutton">
       Versions
      </button>
      <div class="dropdown-content md-hero">
       <a href="https://nvidia.github.io/Torch-TensorRT/" title="master">
        master
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v1.1.0/" title="v1.1.0">
        v1.1.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v1.0.0/" title="v1.0.0">
        v1.0.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.4.1/" title="v0.4.1">
        v0.4.1
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.4.0/" title="v0.4.0">
        v0.4.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.3.0/" title="v0.3.0">
        v0.3.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.2.0/" title="v0.2.0">
        v0.2.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.1.0/" title="v0.1.0">
        v0.1.0
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.0.3/" title="v0.0.3">
        v0.0.3
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.0.2/" title="v0.0.2">
        v0.0.2
       </a>
       <a href="https://nvidia.github.io/Torch-TensorRT/v0.0.1/" title="v0.0.1">
        v0.0.1
       </a>
      </div>
     </div>
    </div>
   </nav>
  </header>
  <div class="md-container">
   <nav class="md-tabs" data-md-component="tabs">
    <div class="md-tabs__inner md-grid">
     <ul class="md-tabs__list">
      <li class="md-tabs__item">
       <a class="md-tabs__link" href="../index.html">
        Torch-TensorRT v1.1.0 documentation
       </a>
      </li>
     </ul>
    </div>
   </nav>
   <main class="md-main">
    <div class="md-main__inner md-grid" data-md-component="container">
     <div class="md-sidebar md-sidebar--primary" data-md-component="navigation">
      <div class="md-sidebar__scrollwrap">
       <div class="md-sidebar__inner">
        <nav class="md-nav md-nav--primary" data-md-level="0">
         <label class="md-nav__title md-nav__title--site" for="__drawer">
          <a class="md-nav__button md-logo" href="../index.html" title="Torch-TensorRT v1.1.0 documentation">
           <i class="md-icon">
            
           </i>
          </a>
          <a href="../index.html" title="Torch-TensorRT v1.1.0 documentation">
           Torch-TensorRT
          </a>
         </label>
         <div class="md-nav__source">
          <a class="md-source" data-md-source="github" href="https://github.com/nvidia/Torch-TensorRT/" title="Go to repository">
           <div class="md-source__icon">
            <svg height="28" viewbox="0 0 24 24" width="28" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
             <use height="24" width="24" xlink:href="#__github">
             </use>
            </svg>
           </div>
           <div class="md-source__repository">
            Torch-TensorRT
           </div>
          </a>
         </div>
         <ul class="md-nav__list">
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Getting Started
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/installation.html">
            Installation
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/getting_started_with_cpp_api.html">
            Getting Started with C++
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/getting_started_with_python_api.html">
            Using Torch-TensorRT in Python
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/creating_torchscript_module_in_python.html">
            Creating a TorchScript Module
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/creating_torchscript_module_in_python.html#working-with-torchscript-in-python">
            Working with TorchScript in Python
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/creating_torchscript_module_in_python.html#saving-torchscript-module-to-disk">
            Saving TorchScript Module to Disk
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/ptq.html">
            Post Training Quantization (PTQ)
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/torchtrtc.html">
            torchtrtc
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/use_from_pytorch.html">
            Using Torch-TensorRT Directly From PyTorch
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/runtime.html">
            Deploying Torch-TensorRT Programs
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../tutorials/using_dla.html">
            DLA
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Notebooks
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_notebooks/CitriNet-example.html">
            Torch-TensorRT Getting Started - CitriNet
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_notebooks/dynamic-shapes.html">
            Torch-TensorRT - Using Dynamic Shapes
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_notebooks/EfficientNet-example.html">
            Torch-TensorRT Getting Started - EfficientNet-B0
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_notebooks/Hugging-Face-BERT.html">
            Masked Language Modeling (MLM) with Hugging Face BERT Transformer
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_notebooks/lenet-getting-started.html">
            Torch-TensorRT Getting Started - LeNet
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_notebooks/Resnet50-example.html">
            Torch-TensorRT Getting Started - ResNet 50
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_notebooks/ssd-object-detection-demo.html">
            Object Detection with Torch-TensorRT (SSD)
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_notebooks/vgg-qat.html">
            Deploying Quantization Aware Trained models in INT8 using Torch-TensorRT
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Python API Documenation
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../py_api/torch_tensorrt.html">
            torch_tensorrt
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../py_api/logging.html">
            torch_tensorrt.logging
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../py_api/ptq.html">
            torch_tensorrt.ptq
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../py_api/ts.html">
            torch_tensorrt.ts
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             C++ API Documenation
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/torch_tensort_cpp.html">
            Torch-TensorRT C++ API
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/namespace_torch_tensorrt.html">
            Namespace torch_tensorrt
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/namespace_torch_tensorrt__logging.html">
            Namespace torch_tensorrt::logging
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/namespace_torch_tensorrt__torchscript.html">
            Namespace torch_tensorrt::torchscript
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../_cpp_api/namespace_torch_tensorrt__ptq.html">
            Namespace torch_tensorrt::ptq
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Contributor Documentation
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../contributors/system_overview.html">
            System Overview
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../contributors/writing_converters.html">
            Writing Converters
           </a>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__link" href="../contributors/useful_links.html">
            Useful Links for Torch-TensorRT Development
           </a>
          </li>
          <li class="md-nav__item">
           <span class="md-nav__link caption">
            <span class="caption-text">
             Indices
            </span>
           </span>
          </li>
          <li class="md-nav__item">
           <input class="md-toggle md-nav__toggle" data-md-toggle="toc" id="__toc" type="checkbox"/>
           <label class="md-nav__link md-nav__link--active" for="__toc">
            Operators Supported
           </label>
           <a class="md-nav__link md-nav__link--active" href="#">
            Operators Supported
           </a>
           <nav class="md-nav md-nav--secondary">
            <label class="md-nav__title" for="__toc">
             Contents
            </label>
            <ul class="md-nav__list" data-md-scrollfix="">
             <li class="md-nav__item">
              <a class="md-nav__link" href="#indices-supported-ops--page-root">
               Operators Supported
              </a>
              <nav class="md-nav">
               <ul class="md-nav__list">
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#operators-currently-supported-through-converters">
                  Operators Currently Supported Through Converters
                 </a>
                </li>
                <li class="md-nav__item">
                 <a class="md-nav__link" href="#operators-currently-supported-through-evaluators">
                  Operators Currently Supported Through Evaluators
                 </a>
                </li>
               </ul>
              </nav>
             </li>
             <li class="md-nav__item">
              <a class="md-nav__extra_link" href="../_sources/indices/supported_ops.rst.txt">
               Show Source
              </a>
             </li>
            </ul>
           </nav>
          </li>
         </ul>
        </nav>
       </div>
      </div>
     </div>
     <div class="md-sidebar md-sidebar--secondary" data-md-component="toc">
      <div class="md-sidebar__scrollwrap">
       <div class="md-sidebar__inner">
        <nav class="md-nav md-nav--secondary">
         <label class="md-nav__title" for="__toc">
          Contents
         </label>
         <ul class="md-nav__list" data-md-scrollfix="">
          <li class="md-nav__item">
           <a class="md-nav__link" href="#indices-supported-ops--page-root">
            Operators Supported
           </a>
           <nav class="md-nav">
            <ul class="md-nav__list">
             <li class="md-nav__item">
              <a class="md-nav__link" href="#operators-currently-supported-through-converters">
               Operators Currently Supported Through Converters
              </a>
             </li>
             <li class="md-nav__item">
              <a class="md-nav__link" href="#operators-currently-supported-through-evaluators">
               Operators Currently Supported Through Evaluators
              </a>
             </li>
            </ul>
           </nav>
          </li>
          <li class="md-nav__item">
           <a class="md-nav__extra_link" href="../_sources/indices/supported_ops.rst.txt">
            Show Source
           </a>
          </li>
          <li class="md-nav__item" id="searchbox">
          </li>
         </ul>
        </nav>
       </div>
      </div>
     </div>
     <div class="md-content">
      <article class="md-content__inner md-typeset" role="main">
       <section id="operators-supported">
        <span id="supported-ops">
        </span>
        <h1 id="indices-supported-ops--page-root">
         Operators Supported
         <a class="headerlink" href="#indices-supported-ops--page-root" title="Permalink to this headline">
          ¶
         </a>
        </h1>
        <section id="operators-currently-supported-through-converters">
         <h2 id="operators-currently-supported-through-converters">
          Operators Currently Supported Through Converters
          <a class="headerlink" href="#operators-currently-supported-through-converters" title="Permalink to this headline">
           ¶
          </a>
         </h2>
         <ul class="simple">
          <li>
           <p>
            aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::abs(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::acos(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::acosh(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::adaptive_avg_pool3d(Tensor self, int[3] output_size) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::adaptive_max_pool1d(Tensor self, int[2] output_size) -&gt; (Tensor, Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -&gt; (Tensor, Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -&gt; (Tensor, Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::add.Tensor(Tensor self, Tensor other, Scalar alpha=1) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::
            <a href="#id37">
             <span class="problematic" id="id38">
              add_
             </span>
            </a>
            .Tensor(Tensor(a!) self, Tensor other,
            <a href="#id1">
             <span class="problematic" id="id2">
              *
             </span>
            </a>
            , Scalar alpha=1) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::asin(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::asinh(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::atan(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::atanh(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], bool ceil_mode=False, bool count_include_pad=True) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=[0, 0], bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=[], bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::batch_norm(Tensor input, Tensor? gamma, Tensor? beta, Tensor? mean, Tensor? var, bool training, float momentum, float eps, bool cudnn_enabled) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::bmm(Tensor self, Tensor mat2) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::cat(Tensor[] tensors, int dim=0) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::ceil(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::clamp_max(Tensor self, Scalar max) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::clamp_min(Tensor self, Scalar min) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::cos(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::cosh(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::cumsum(Tensor self, int dim,
            <a href="#id3">
             <span class="problematic" id="id4">
              *
             </span>
            </a>
            , int? dtype=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::div.Scalar(Tensor self, Scalar other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::div.Tensor(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::div.Tensor_mode(Tensor self, Tensor other,
            <a href="#id5">
             <span class="problematic" id="id6">
              *
             </span>
            </a>
            , str? rounding_mode) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::
            <a href="#id39">
             <span class="problematic" id="id40">
              div_
             </span>
            </a>
            .Scalar(Tensor(a!) self, Scalar other) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::
            <a href="#id41">
             <span class="problematic" id="id42">
              div_
             </span>
            </a>
            .Tensor(Tensor(a!) self, Tensor other) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::eq.Scalar(Tensor self, Scalar other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::eq.Tensor(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::erf(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::exp(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::expand(Tensor(a) self, int[] size,
            <a href="#id7">
             <span class="problematic" id="id8">
              *
             </span>
            </a>
            , bool implicit=False) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::expand_as(Tensor(a) self, Tensor other) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::flatten.using_ints(Tensor self, int start_dim=0, int end_dim=-1) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::floor(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::floor_divide(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::floor_divide.Scalar(Tensor self, Scalar other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::ge.Scalar(Tensor self, Scalar other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::ge.Tensor(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::gt.Scalar(Tensor self, Scalar other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::gt.Tensor(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::index.Tensor(Tensor self, Tensor?[] indices) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? gamma, Tensor? beta, float eps, bool cudnn_enabled) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::le.Scalar(Tensor self, Scalar other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::le.Tensor(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::log(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -&gt; (Tensor, Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::lt.Scalar(Tensor self, Scalar other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::lt.Tensor(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::matmul(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::max(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::max.dim(Tensor self, int dim, bool keepdim=False) -&gt; (Tensor values, Tensor indices)
           </p>
          </li>
          <li>
           <p>
            aten::max.other(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[], int[1] dilation=[], bool ceil_mode=False) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=[0, 0], int[2] dilation=[1, 1], bool ceil_mode=False) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=[], int[3] dilation=[], bool ceil_mode=False) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::mean(Tensor self,
            <a href="#id9">
             <span class="problematic" id="id10">
              *
             </span>
            </a>
            , int? dtype=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::mean.dim(Tensor self, int[] dim, bool keepdim=False,
            <a href="#id11">
             <span class="problematic" id="id12">
              *
             </span>
            </a>
            , int? dtype=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::min(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::min.other(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::mul.Scalar(Tensor self, Scalar other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::mul.Tensor(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::
            <a href="#id43">
             <span class="problematic" id="id44">
              mul_
             </span>
            </a>
            .Tensor(Tensor(a!) self, Tensor other) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::narrow(Tensor(a) self, int dim, int start, int length) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::ne.Scalar(Tensor self, Scalar other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::ne.Tensor(Tensor self, Tensor other) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::neg(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::permute(Tensor(a) self, int[] dims) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::pixel_shuffle(Tensor self, int upscale_factor) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::prelu(Tensor self, Tensor weight) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::prod(Tensor self,
            <a href="#id13">
             <span class="problematic" id="id14">
              *
             </span>
            </a>
            , int? dtype=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::prod.dim_int(Tensor self, int dim, bool keepdim=False,
            <a href="#id15">
             <span class="problematic" id="id16">
              *
             </span>
            </a>
            , int? dtype=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::reciprocal(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::reflection_pad1d(Tensor self, int[2] padding) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::reflection_pad2d(Tensor self, int[4] padding) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::relu(Tensor input) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::relu_(Tensor(a!) self) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::repeat(Tensor self, int[] repeats) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::replication_pad1d(Tensor self, int[2] padding) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::replication_pad2d(Tensor self, int[4] padding) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::replication_pad3d(Tensor self, int[6] padding) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::reshape(Tensor self, int[] shape) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::rsub.Tensor(Tensor self, Tensor other, Scalar alpha=1) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::select.int(Tensor(a) self, int dim, int index) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::sigmoid(Tensor input) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::sigmoid_(Tensor(a!) self) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::sin(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::sinh(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::slice.Tensor(Tensor(a) self, int dim=0, int? start=None, int? end=None, int step=1) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::softmax.int(Tensor self, int dim, int? dtype=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::split(Tensor self, int[] split_sizes, int dim=0) -&gt; (Tensor[])
           </p>
          </li>
          <li>
           <p>
            aten::split.Tensor(Tensor(a) self, int split_size, int dim=0) -&gt; (Tensor[])
           </p>
          </li>
          <li>
           <p>
            aten::split_with_sizes(Tensor(a) self, int[] split_sizes, int dim=0) -&gt; (Tensor[])
           </p>
          </li>
          <li>
           <p>
            aten::sqrt(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::squeeze.dim(Tensor(a) self, int dim) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::stack(Tensor[] tensors, int dim=0) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::sub.Tensor(Tensor self, Tensor other, Scalar alpha=1) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::
            <a href="#id45">
             <span class="problematic" id="id46">
              sub_
             </span>
            </a>
            .Tensor(Tensor(a!) self, Tensor other,
            <a href="#id17">
             <span class="problematic" id="id18">
              *
             </span>
            </a>
            , Scalar alpha=1) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::sum(Tensor self,
            <a href="#id19">
             <span class="problematic" id="id20">
              *
             </span>
            </a>
            , int? dtype=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False,
            <a href="#id21">
             <span class="problematic" id="id22">
              *
             </span>
            </a>
            , int? dtype=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::t(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::tan(Tensor self) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::tanh(Tensor input) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::tanh_(Tensor(a!) self) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::to.device(Tensor(a) self, Device device, int dtype, bool non_blocking=False, bool copy=False, int? memory_format=None) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::to.dtype(Tensor self, int dtype, bool non_blocking=False, bool copy=False, int? memory_format=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::to.other(Tensor self, Tensor other, bool non_blocking=False, bool copy=False, int? memory_format=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -&gt; (Tensor(a|b))
           </p>
          </li>
          <li>
           <p>
            aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -&gt; (Tensor values, Tensor indices)
           </p>
          </li>
          <li>
           <p>
            aten::transpose.int(Tensor(a) self, int dim0, int dim1) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::unbind.int(Tensor(a -&gt;
            <a href="#id23">
             <span class="problematic" id="id24">
              *
             </span>
            </a>
            ) self, int dim=0) -&gt; (Tensor[])
           </p>
          </li>
          <li>
           <p>
            aten::unsqueeze(Tensor(a) self, int dim) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            aten::upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_bilinear2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, float? scales=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_linear1d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_nearest1d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_nearest3d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::upsample_trilinear3d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::view(Tensor(a) self, int[] size) -&gt; (Tensor(a))
           </p>
          </li>
          <li>
           <p>
            trt::const(Tensor self) -&gt; (Tensor)
           </p>
          </li>
         </ul>
        </section>
        <section id="operators-currently-supported-through-evaluators">
         <h2 id="operators-currently-supported-through-evaluators">
          Operators Currently Supported Through Evaluators
          <a class="headerlink" href="#operators-currently-supported-through-evaluators" title="Permalink to this headline">
           ¶
          </a>
         </h2>
         <ul class="simple">
          <li>
           <p>
            aten::Bool.float(float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::Bool.int(int a) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::Float.Scalar(Scalar a) -&gt; float
           </p>
          </li>
          <li>
           <p>
            aten::Float.bool(bool a) -&gt; float
           </p>
          </li>
          <li>
           <p>
            aten::Float.int(int a) -&gt; float
           </p>
          </li>
          <li>
           <p>
            aten::Int.Scalar(Scalar a) -&gt; int
           </p>
          </li>
          <li>
           <p>
            aten::Int.bool(bool a) -&gt; int
           </p>
          </li>
          <li>
           <p>
            aten::Int.float(float a) -&gt; int
           </p>
          </li>
          <li>
           <p>
            aten::Int.int(int a) -&gt; int
           </p>
          </li>
          <li>
           <p>
            aten::__and__(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::__and__.bool(bool a, bool b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::__getitem__.t(t[](a) list, int idx) -&gt; (t(*))
           </p>
          </li>
          <li>
           <p>
            aten::__is__(t1 self, t2 obj) -&gt; bool
           </p>
          </li>
          <li>
           <p>
            aten::__isnot__(t1 self, t2 obj) -&gt; bool
           </p>
          </li>
          <li>
           <p>
            aten::__not__(bool self) -&gt; bool
           </p>
          </li>
          <li>
           <p>
            aten::__or__(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::__range_length(int lo, int hi, int step) -&gt; int
           </p>
          </li>
          <li>
           <p>
            aten::__round_to_zero_floordiv(int a, int b) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::__xor__(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::add.float(float a, float b) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::add.int(int a, int b) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::add.str(str a, str b) -&gt; (str)
           </p>
          </li>
          <li>
           <p>
            aten::
            <a href="#id47">
             <span class="problematic" id="id48">
              add_
             </span>
            </a>
            .t(t[](a!) self, t[] b) -&gt; (t[])
           </p>
          </li>
          <li>
           <p>
            aten::append.t(t[](a!) self, t(c -&gt;
            <a href="#id25">
             <span class="problematic" id="id26">
              *
             </span>
            </a>
            ) el) -&gt; (t[](a!))
           </p>
          </li>
          <li>
           <dl class="simple">
            <dt>
             aten::arange(Scalar end,
             <a href="#id27">
              <span class="problematic" id="id28">
               *
              </span>
             </a>
             , int? dtype=None, int? layout=None,
            </dt>
            <dd>
             <p>
              Device? device=None, bool? pin_memory=None) -&gt; (Tensor)
             </p>
            </dd>
           </dl>
          </li>
          <li>
           <dl class="simple">
            <dt>
             aten::arange.start(Scalar start, Scalar end,
             <a href="#id29">
              <span class="problematic" id="id30">
               *
              </span>
             </a>
             , ScalarType? dtype=None,
            </dt>
            <dd>
             <p>
              Layout? layout=None, Device? device=None, bool? pin_memory=None) -&gt; (Tensor)
             </p>
            </dd>
           </dl>
          </li>
          <li>
           <dl class="simple">
            <dt>
             aten::arange.start_step(Scalar start, Scalar end, Scalar step,
             <a href="#id31">
              <span class="problematic" id="id32">
               *
              </span>
             </a>
             , ScalarType? dtype=None,
            </dt>
            <dd>
             <p>
              Layout? layout=None, Device? device=None, bool? pin_memory=None) -&gt; (Tensor)
             </p>
            </dd>
           </dl>
          </li>
          <li>
           <p>
            aten::clone(Tensor self,
            <a href="#id33">
             <span class="problematic" id="id34">
              *
             </span>
            </a>
            , int? memory_format=None) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -&gt; (Tensor(a!))
           </p>
          </li>
          <li>
           <p>
            aten::dim(Tensor self) -&gt; int
           </p>
          </li>
          <li>
           <p>
            aten::div.float(float a, float b) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::div.int(int a, int b) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::eq.bool(bool a, bool b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::eq.float(float a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::eq.float_int(float a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::eq.int(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::eq.int_float(int a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::eq.str(str a, str b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::extend.t(t[](a!) self, t[] other) -&gt; ()
           </p>
          </li>
          <li>
           <p>
            aten::floor.float(float a) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::floor.int(int a) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::floordiv.float(float a, float b) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::floordiv.int(int a, int b) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::format(str self, …) -&gt; (str)
           </p>
          </li>
          <li>
           <p>
            aten::ge.bool(bool a, bool b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::ge.float(float a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::ge.float_int(float a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::ge.int(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::ge.int_float(int a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::gt.bool(bool a, bool b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::gt.float(float a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::gt.float_int(float a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::gt.int(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::gt.int_float(int a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::is_floating_point(Tensor self) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::le.bool(bool a, bool b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::le.float(float a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::le.float_int(float a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::le.int(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::le.int_float(int a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::len.t(t[] a) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::lt.bool(bool a, bool b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::lt.float(float a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::lt.float_int(float a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::lt.int(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::lt.int_float(int a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::mul.float(float a, float b) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::mul.int(int a, int b) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::ne.bool(bool a, bool b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::ne.float(float a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::ne.float_int(float a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::ne.int(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::ne.int_float(int a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            aten::neg.int(int a) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::numel(Tensor self) -&gt; int
           </p>
          </li>
          <li>
           <p>
            aten::pow.float(float a, float b) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::pow.float_int(float a, int b) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::pow.int(int a, int b) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::pow.int_float(int a, float b) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::size(Tensor self) -&gt; (int[])
           </p>
          </li>
          <li>
           <p>
            aten::size.int(Tensor self, int dim) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::slice.t(t[] l, int start, int end=9223372036854775807, int step=1) -&gt; (t[])
           </p>
          </li>
          <li>
           <p>
            aten::sqrt.float(float a) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::sqrt.int(int a) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::sub.float(float a, float b) -&gt; (float)
           </p>
          </li>
          <li>
           <p>
            aten::sub.int(int a, int b) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            aten::tensor(t[] data,
            <a href="#id35">
             <span class="problematic" id="id36">
              *
             </span>
            </a>
            , int? dtype=None, Device? device=None, bool requires_grad=False) -&gt; (Tensor)
           </p>
          </li>
          <li>
           <p>
            prim::dtype(Tensor a) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            prim::max.bool(bool a, bool b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            prim::max.float(float a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            prim::max.float_int(float a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            prim::max.int(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            prim::max.int_float(int a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            prim::max.self_int(int[] self) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            prim::min.bool(bool a, bool b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            prim::min.float(float a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            prim::min.float_int(float a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            prim::min.int(int a, int b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            prim::min.int_float(int a, float b) -&gt; (bool)
           </p>
          </li>
          <li>
           <p>
            prim::min.self_int(int[] self) -&gt; (int)
           </p>
          </li>
          <li>
           <p>
            prim::shape(Tensor a) -&gt; (int[])
           </p>
          </li>
         </ul>
        </section>
       </section>
      </article>
     </div>
    </div>
   </main>
  </div>
  <footer class="md-footer">
   <div class="md-footer-nav">
    <nav class="md-footer-nav__inner md-grid">
     <a class="md-flex md-footer-nav__link md-footer-nav__link--prev" href="../contributors/useful_links.html" rel="prev" title="Useful Links for Torch-TensorRT Development">
      <div class="md-flex__cell md-flex__cell--shrink">
       <i class="md-icon md-icon--arrow-back md-footer-nav__button">
       </i>
      </div>
      <div class="md-flex__cell md-flex__cell--stretch md-footer-nav__title">
       <span class="md-flex__ellipsis">
        <span class="md-footer-nav__direction">
         Previous
        </span>
        Useful Links for Torch-TensorRT Development
       </span>
      </div>
     </a>
    </nav>
   </div>
   <div class="md-footer-meta md-typeset">
    <div class="md-footer-meta__inner md-grid">
     <div class="md-footer-copyright">
      <div class="md-footer-copyright__highlight">
       © Copyright 2021, NVIDIA Corporation.
      </div>
      Created using
      <a href="http://www.sphinx-doc.org/">
       Sphinx
      </a>
      4.3.0.
             and
      <a href="https://github.com/bashtage/sphinx-material/">
       Material for
              Sphinx
      </a>
     </div>
    </div>
   </div>
  </footer>
  <script src="../_static/javascripts/application.js">
  </script>
  <script>
   app.initialize({version: "1.0.4", url: {base: ".."}})
  </script>
 </body>
</html>