


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  <meta name="generator" content="Docutils 0.17.1: http://docutils.sourceforge.net/" />

  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>Post Training Quantization (PTQ) &mdash; Torch-TensorRT v2.5.0.dev0+bb405b0 documentation</title>
  

  
  
  
  

  

  
  
    

  

  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <!-- <link rel="stylesheet" href="../_static/pygments.css" type="text/css" /> -->
  <link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../_static/sg_gallery.css" type="text/css" />
  <link rel="stylesheet" href="../_static/sg_gallery-binder.css" type="text/css" />
  <link rel="stylesheet" href="../_static/sg_gallery-dataframe.css" type="text/css" />
  <link rel="stylesheet" href="../_static/sg_gallery-rendered-html.css" type="text/css" />
  <link rel="stylesheet" href="../_static/collapsible-lists/css/tree_view.css" type="text/css" />
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../_static/css/custom.css" type="text/css" />
    <link rel="index" title="Index" href="../genindex.html" />
    <link rel="search" title="Search" href="../search.html" />
    <link rel="next" title="Saving models compiled with Torch-TensorRT" href="saving_models.html" />
    <link rel="prev" title="Dynamic shapes with Torch-TensorRT" href="dynamic_shapes.html" />
  <!-- Google Tag Manager -->
    <script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
    new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
    j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
    'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
    })(window,document,'script','dataLayer','');</script>
    <!-- End Google Tag Manager -->
  

  
  <script src="../_static/js/modernizr.min.js"></script>

  <!-- Preload the theme fonts -->

<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">

<!-- Preload the katex fonts -->

<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
  <link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.15.2/css/all.css" integrity="sha384-vSIIfh2YWi9wW0r9iZe7RJPrKwp6bG+s9QZMoITbCckVJqGCCRhc+ccxNcdpHuYu" crossorigin="anonymous">
</head>

<div class="container-fluid header-holder tutorials-header" id="header-holder">
  <div class="container">
    <div class="header-container">
      <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>

      <div class="main-menu">
        <ul>

          <li class="main-menu-item">
          <div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
              <a class="with-down-arrow">
                Learn
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/get-started">
                  <span class=dropdown-title>Get Started</span>
                  <p>Run PyTorch locally or get started quickly with one of the supported cloud platforms</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/tutorials">
                  <span class="dropdown-title">Tutorials</span>
                  <p>Whats new in PyTorch tutorials</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/tutorials/beginner/basics/intro.html">
                  <span class="dropdown-title">Learn the Basics</span>
                  <p>Familiarize yourself with PyTorch concepts and modules</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/tutorials/recipes/recipes_index.html">
                  <span class="dropdown-title">PyTorch Recipes</span>
                  <p>Bite-size, ready-to-deploy PyTorch code examples</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/tutorials/beginner/introyt.html">
                  <span class="dropdown-title">Intro to PyTorch - YouTube Series</span>
                  <p>Master PyTorch basics with our engaging YouTube tutorial series</p>
                </a>
              </div>
            </div>
          </li>

          <li>
          <div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
              <a class="with-down-arrow">
                Ecosystem
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/ecosystem">
                  <span class="dropdown-title">Tools</span>
                  <p>Learn about the tools and frameworks in the PyTorch Ecosystem</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/#community-module">
                  <span class=dropdown-title>Community</span>
                  <p>Join the PyTorch developer community to contribute, learn, and get your questions answered</p>
                </a>
                <a class="nav-dropdown-item" href="https://discuss.pytorch.org/" target="_blank">
                  <span class=dropdown-title>Forums</span>
                  <p>A place to discuss PyTorch code, issues, install, research</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/resources">
                  <span class=dropdown-title>Developer Resources</span>
                  <p>Find resources and get questions answered</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/ecosystem/contributor-awards-2023">
                  <span class="dropdown-title">Contributor Awards - 2023</span>
                  <p>Award winners announced at this year's PyTorch Conference</p>
                </a>
              </div>
            </div>
          </li>

          <li>
          <div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
              <a class="with-down-arrow">
                Edge
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/edge">
                  <span class="dropdown-title">About PyTorch Edge</span>
                  <p>Build innovative and privacy-aware AI experiences for edge devices</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/executorch-overview">
                  <span class="dropdown-title">ExecuTorch</span>
                  <p>End-to-end solution for enabling on-device inference capabilities across mobile and edge devices</p>
                </a>
              </div>
            </div>  
          </li>

          <li class="main-menu-item">
            <div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
              <a class="with-down-arrow">
                Docs
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/docs/stable/index.html">
                  <span class="dropdown-title">PyTorch</span>
                  <p>Explore the documentation for comprehensive guidance on how to use PyTorch</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/pytorch-domains">
                  <span class="dropdown-title">PyTorch Domains</span>
                  <p>Read the PyTorch Domains documentation to learn more about domain-specific libraries</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
              <a class="with-down-arrow">
                Blogs & News 
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/blog/">
                  <span class="dropdown-title">PyTorch Blog</span>
                  <p>Catch up on the latest technical news and happenings</p>
                </a>
                 <a class="nav-dropdown-item" href="https://pytorch.org/community-blog">
                  <span class="dropdown-title">Community Blog</span>
                  <p>Stories from the PyTorch ecosystem</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/videos">
                  <span class="dropdown-title">Videos</span>
                  <p>Learn about the latest PyTorch tutorials, new, and more </p>
                <a class="nav-dropdown-item" href="https://pytorch.org/community-stories">
                  <span class="dropdown-title">Community Stories</span>
                  <p>Learn how our community solves real, everyday machine learning problems with PyTorch</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/events">
                  <span class="dropdown-title">Events</span>
                  <p>Find events, webinars, and podcasts</p>
                </a>
            </div>
          </li>

          <li>
            <div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
              <a class="with-down-arrow">
                About
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/foundation">
                  <span class="dropdown-title">PyTorch Foundation</span>
                  <p>Learn more about the PyTorch Foundation</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/governing-board">
                  <span class="dropdown-title">Governing Board</span>
                  <p></p>
                </a>
              </div>
            </div>
          </li>

          <li class="main-menu-item">
            <div class="no-dropdown">
              <a href="https://pytorch.org/join" data-cta="join">
                Become a Member
              </a>
            </div>
          </li>
          <li>
           <div class="main-menu-item">
             <a href="https://github.com/pytorch/pytorch" class="github-icon">
             </a>
           </div>
          </li>
          <!--- TODO: This block adds the search icon to the nav bar. We will enable it later. 
          <li>
            <div class="main-menu-item">
             <a href="https://github.com/pytorch/pytorch" class="search-icon">
             </a>
            </div>
          </li>
          --->
        </ul>
      </div>

      <a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
    </div>
  </div>
</div>

<body class="pytorch-body">

   

    

    <div class="table-of-contents-link-wrapper">
      <span>Table of Contents</span>
      <a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
    </div>

    <nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
      <div class="pytorch-side-scroll">
        <div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          <div class="pytorch-left-menu-search">
            

            
              
              
                <div class="version">
                  v2.5.0.dev0+bb405b0
                </div>
              
            

            


  


<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
    <input type="text" name="q" placeholder="Search Docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

            
          </div>

          
            
            
              
            
            
              <p class="caption" role="heading"><span class="caption-text">Getting Started</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../getting_started/installation.html">Installation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../getting_started/getting_started_with_windows.html">Building Torch-TensorRT on Windows</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Dynamo Frontend</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../dynamo/torch_compile.html">TensorRT Backend for <code class="docutils literal notranslate"><span class="pre">torch.compile</span></code></a></li>
<li class="toctree-l1"><a class="reference internal" href="../dynamo/dynamo_export.html">Compiling Exported Programs with Torch-TensorRT</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">TorchScript Frontend</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../ts/creating_torchscript_module_in_python.html">Creating a TorchScript Module</a></li>
<li class="toctree-l1"><a class="reference internal" href="../ts/creating_torchscript_module_in_python.html#working-with-torchscript-in-python">Working with TorchScript in Python</a></li>
<li class="toctree-l1"><a class="reference internal" href="../ts/creating_torchscript_module_in_python.html#saving-torchscript-module-to-disk">Saving TorchScript Module to Disk</a></li>
<li class="toctree-l1"><a class="reference internal" href="../ts/getting_started_with_python_api.html">Using Torch-TensorRT in Python</a></li>
<li class="toctree-l1"><a class="reference internal" href="../ts/getting_started_with_cpp_api.html">Using Torch-TensorRT in  C++</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">FX Frontend</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../fx/getting_started_with_fx_path.html">Torch-TensorRT (FX Frontend) User Guide</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">User Guide</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="dynamic_shapes.html">Dynamic shapes with Torch-TensorRT</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">Post Training Quantization (PTQ)</a></li>
<li class="toctree-l1"><a class="reference internal" href="saving_models.html">Saving models compiled with Torch-TensorRT</a></li>
<li class="toctree-l1"><a class="reference internal" href="runtime.html">Deploying Torch-TensorRT Programs</a></li>
<li class="toctree-l1"><a class="reference internal" href="using_dla.html">DLA</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Tutorials</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../tutorials/serving_torch_tensorrt_with_triton.html">Serving a Torch-TensorRT model with Triton</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorials/notebooks.html">Example notebooks</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorials/_rendered_examples/dynamo/torch_compile_resnet_example.html">Compiling ResNet using the Torch-TensorRT <cite>torch.compile</cite> Backend</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorials/_rendered_examples/dynamo/torch_compile_transformers_example.html">Compiling a Transformer using torch.compile and TensorRT</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorials/_rendered_examples/dynamo/torch_compile_advanced_usage.html">Torch Compile Advanced Usage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorials/_rendered_examples/dynamo/torch_compile_stable_diffusion.html">Torch Compile Stable Diffusion</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorials/_rendered_examples/dynamo/custom_kernel_plugins.html">Using Custom Kernels within TensorRT Engines with Torch-TensorRT</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorials/_rendered_examples/dynamo/custom_kernel_plugins.html#wrapping-custom-kernels-to-use-in-tensorrt">Wrapping Custom Kernels to use in TensorRT</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorials/_rendered_examples/dynamo/custom_kernel_plugins.html#using-torch-tensorrt-to-insert-the-kernel">Using Torch-TensorRT to Insert the Kernel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tutorials/_rendered_examples/dynamo/vgg16_fp8_ptq.html">Deploy Quantized Models using Torch-TensorRT</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Python API Documenation</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../py_api/torch_tensorrt.html">torch_tensorrt</a></li>
<li class="toctree-l1"><a class="reference internal" href="../py_api/logging.html">torch_tensorrt.logging</a></li>
<li class="toctree-l1"><a class="reference internal" href="../py_api/ptq.html">torch_tensorrt.ptq</a></li>
<li class="toctree-l1"><a class="reference internal" href="../py_api/dynamo.html">torch_tensorrt.dynamo</a></li>
<li class="toctree-l1"><a class="reference internal" href="../py_api/ts.html">torch_tensorrt.ts</a></li>
<li class="toctree-l1"><a class="reference internal" href="../py_api/fx.html">torch_tensorrt.fx</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">C++ API Documenation</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../_cpp_api/torch_tensort_cpp.html">Torch-TensorRT C++ API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../_cpp_api/namespace_torch_tensorrt.html">Namespace torch_tensorrt</a></li>
<li class="toctree-l1"><a class="reference internal" href="../_cpp_api/namespace_torch_tensorrt__logging.html">Namespace torch_tensorrt::logging</a></li>
<li class="toctree-l1"><a class="reference internal" href="../_cpp_api/namespace_torch_tensorrt__torchscript.html">Namespace torch_tensorrt::torchscript</a></li>
<li class="toctree-l1"><a class="reference internal" href="../_cpp_api/namespace_torch_tensorrt__ptq.html">Namespace torch_tensorrt::ptq</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">CLI Documenation</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../cli/torchtrtc.html">torchtrtc</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Contributor Documentation</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../contributors/system_overview.html">System Overview</a></li>
<li class="toctree-l1"><a class="reference internal" href="../contributors/dynamo_converters.html">Writing Dynamo Converters</a></li>
<li class="toctree-l1"><a class="reference internal" href="../contributors/writing_dynamo_aten_lowering_passes.html">Writing Dynamo ATen Lowering Passes</a></li>
<li class="toctree-l1"><a class="reference internal" href="../contributors/ts_converters.html">Writing TorchScript Converters</a></li>
<li class="toctree-l1"><a class="reference internal" href="../contributors/useful_links.html">Useful Links for Torch-TensorRT Development</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Indices</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../indices/supported_ops.html">Operators Supported</a></li>
</ul>

            
          
        </div>
      </div>
    </nav>

    <div class="pytorch-container">
      <div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
        <div class="pytorch-breadcrumbs-wrapper">
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="pytorch-breadcrumbs">
    
      <li>
        <a href="../index.html">
          
            Docs
          
        </a> &gt;
      </li>

        
      <li>Post Training Quantization (PTQ)</li>
    
    
      <li class="pytorch-breadcrumbs-aside">
        
            
            <a href="../_sources/user_guide/ptq.rst.txt" rel="nofollow"><img src="../_static/images/view-page-source-icon.svg"></a>
          
        
      </li>
    
  </ul>

  
</div>
        </div>

        <div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
          Shortcuts
        </div>
      </div>

      <section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
        <div class="pytorch-content-left">

        
          <!-- Google Tag Manager (noscript) -->
          <noscript><iframe src="https://www.googletagmanager.com/ns.html?id="
          height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
          <!-- End Google Tag Manager (noscript) -->
          
          <div class="rst-content">
          
            <div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
             <article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
              
  <section id="post-training-quantization-ptq">
<span id="ptq"></span><h1>Post Training Quantization (PTQ)<a class="headerlink" href="#post-training-quantization-ptq" title="Permalink to this heading">¶</a></h1>
<p>Post Training Quantization (PTQ) is a technique to reduce the required computational resources for inference
while still preserving the accuracy of your model by mapping the traditional FP32 activation space to a reduced
INT8 space. TensorRT uses a calibration step which executes your model with sample data from the target domain
and track the activations in FP32 to calibrate a mapping to INT8 that minimizes the information loss between
FP32 inference and INT8 inference.</p>
<p>Users writing TensorRT applications are required to setup a calibrator class which will provide sample data to
the TensorRT calibrator. With Torch-TensorRT we look to leverage existing infrastructure in PyTorch to make implementing
calibrators easier.</p>
<p>LibTorch provides a <code class="docutils literal notranslate"><span class="pre">DataLoader</span></code> and <code class="docutils literal notranslate"><span class="pre">Dataset</span></code> API which streamlines preprocessing and batching input data.
These APIs are exposed via both C++ and Python interface which makes it easier for the end user.
For C++ interface, we use <code class="docutils literal notranslate"><span class="pre">torch::Dataset</span></code> and <code class="docutils literal notranslate"><span class="pre">torch::data::make_data_loader</span></code> objects to construct and perform pre-processing on datasets.
The equivalent functionality in python interface uses <code class="docutils literal notranslate"><span class="pre">torch.utils.data.Dataset</span></code> and <code class="docutils literal notranslate"><span class="pre">torch.utils.data.DataLoader</span></code>.
This section of the PyTorch documentation has more information <a class="reference external" href="https://pytorch.org/tutorials/advanced/cpp_frontend.html#loading-data">https://pytorch.org/tutorials/advanced/cpp_frontend.html#loading-data</a> and <a class="reference external" href="https://pytorch.org/tutorials/recipes/recipes/loading_data_recipe.html">https://pytorch.org/tutorials/recipes/recipes/loading_data_recipe.html</a>.
Torch-TensorRT uses Dataloaders as the base of a generic calibrator implementation. So you will be able to reuse or quickly
implement a <code class="docutils literal notranslate"><span class="pre">torch::Dataset</span></code> for your target domain, place it in a DataLoader and create an INT8 Calibrator
which you can provide to Torch-TensorRT to run INT8 Calibration during compilation of your module.</p>
<section id="how-to-create-your-own-ptq-application-in-c">
<span id="writing-ptq-cpp"></span><h2>How to create your own PTQ application in C++<a class="headerlink" href="#how-to-create-your-own-ptq-application-in-c" title="Permalink to this heading">¶</a></h2>
<p>Here is an example interface of a <code class="docutils literal notranslate"><span class="pre">torch::Dataset</span></code> class for CIFAR10:</p>
<div class="highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="linenos"> 1</span><span class="c1">//cpp/ptq/datasets/cifar10.h</span>
<span class="linenos"> 2</span><span class="cp">#pragma once</span>
<span class="linenos"> 3</span>
<span class="linenos"> 4</span><span class="cp">#include</span><span class="w"> </span><span class="cpf">&quot;torch/data/datasets/base.h&quot;</span>
<span class="linenos"> 5</span><span class="cp">#include</span><span class="w"> </span><span class="cpf">&quot;torch/data/example.h&quot;</span>
<span class="linenos"> 6</span><span class="cp">#include</span><span class="w"> </span><span class="cpf">&quot;torch/types.h&quot;</span>
<span class="linenos"> 7</span>
<span class="linenos"> 8</span><span class="cp">#include</span><span class="w"> </span><span class="cpf">&lt;cstddef&gt;</span>
<span class="linenos"> 9</span><span class="cp">#include</span><span class="w"> </span><span class="cpf">&lt;string&gt;</span>
<span class="linenos">10</span>
<span class="linenos">11</span><span class="k">namespace</span><span class="w"> </span><span class="nn">datasets</span><span class="w"> </span><span class="p">{</span>
<span class="linenos">12</span><span class="c1">// The CIFAR10 Dataset</span>
<span class="linenos">13</span><span class="k">class</span><span class="w"> </span><span class="nc">CIFAR10</span><span class="w"> </span><span class="o">:</span><span class="w"> </span><span class="k">public</span><span class="w"> </span><span class="n">torch</span><span class="o">::</span><span class="n">data</span><span class="o">::</span><span class="n">datasets</span><span class="o">::</span><span class="n">Dataset</span><span class="o">&lt;</span><span class="n">CIFAR10</span><span class="o">&gt;</span><span class="w"> </span><span class="p">{</span>
<span class="linenos">14</span><span class="k">public</span><span class="o">:</span>
<span class="linenos">15</span><span class="w">    </span><span class="c1">// The mode in which the dataset is loaded</span>
<span class="linenos">16</span><span class="w">    </span><span class="k">enum</span><span class="w"> </span><span class="k">class</span><span class="w"> </span><span class="nc">Mode</span><span class="w"> </span><span class="p">{</span><span class="w"> </span><span class="n">kTrain</span><span class="p">,</span><span class="w"> </span><span class="n">kTest</span><span class="w"> </span><span class="p">};</span>
<span class="linenos">17</span>
<span class="linenos">18</span><span class="w">    </span><span class="c1">// Loads CIFAR10 from un-tarred file</span>
<span class="linenos">19</span><span class="w">    </span><span class="c1">// Dataset can be found https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz</span>
<span class="linenos">20</span><span class="w">    </span><span class="c1">// Root path should be the directory that contains the content of tarball</span>
<span class="linenos">21</span><span class="w">    </span><span class="k">explicit</span><span class="w"> </span><span class="n">CIFAR10</span><span class="p">(</span><span class="k">const</span><span class="w"> </span><span class="n">std</span><span class="o">::</span><span class="n">string</span><span class="o">&amp;</span><span class="w"> </span><span class="n">root</span><span class="p">,</span><span class="w"> </span><span class="n">Mode</span><span class="w"> </span><span class="n">mode</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">Mode</span><span class="o">::</span><span class="n">kTrain</span><span class="p">);</span>
<span class="linenos">22</span>
<span class="linenos">23</span><span class="w">    </span><span class="c1">// Returns the pair at index in the dataset</span>
<span class="linenos">24</span><span class="w">    </span><span class="n">torch</span><span class="o">::</span><span class="n">data</span><span class="o">::</span><span class="n">Example</span><span class="o">&lt;&gt;</span><span class="w"> </span><span class="n">get</span><span class="p">(</span><span class="kt">size_t</span><span class="w"> </span><span class="n">index</span><span class="p">)</span><span class="w"> </span><span class="k">override</span><span class="p">;</span>
<span class="linenos">25</span>
<span class="linenos">26</span><span class="w">    </span><span class="c1">// The size of the dataset</span>
<span class="linenos">27</span><span class="w">    </span><span class="n">c10</span><span class="o">::</span><span class="n">optional</span><span class="o">&lt;</span><span class="kt">size_t</span><span class="o">&gt;</span><span class="w"> </span><span class="n">size</span><span class="p">()</span><span class="w"> </span><span class="k">const</span><span class="w"> </span><span class="k">override</span><span class="p">;</span>
<span class="linenos">28</span>
<span class="linenos">29</span><span class="w">    </span><span class="c1">// The mode the dataset is in</span>
<span class="linenos">30</span><span class="w">    </span><span class="kt">bool</span><span class="w"> </span><span class="nf">is_train</span><span class="p">()</span><span class="w"> </span><span class="k">const</span><span class="w"> </span><span class="k">noexcept</span><span class="p">;</span>
<span class="linenos">31</span>
<span class="linenos">32</span><span class="w">    </span><span class="c1">// Returns all images stacked into a single tensor</span>
<span class="linenos">33</span><span class="w">    </span><span class="k">const</span><span class="w"> </span><span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span><span class="o">&amp;</span><span class="w"> </span><span class="nf">images</span><span class="p">()</span><span class="w"> </span><span class="k">const</span><span class="p">;</span>
<span class="linenos">34</span>
<span class="linenos">35</span><span class="w">    </span><span class="c1">// Returns all targets stacked into a single tensor</span>
<span class="linenos">36</span><span class="w">    </span><span class="k">const</span><span class="w"> </span><span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span><span class="o">&amp;</span><span class="w"> </span><span class="nf">targets</span><span class="p">()</span><span class="w"> </span><span class="k">const</span><span class="p">;</span>
<span class="linenos">37</span>
<span class="linenos">38</span><span class="w">    </span><span class="c1">// Trims the dataset to the first n pairs</span>
<span class="linenos">39</span><span class="w">    </span><span class="n">CIFAR10</span><span class="o">&amp;&amp;</span><span class="w"> </span><span class="nf">use_subset</span><span class="p">(</span><span class="kt">int64_t</span><span class="w"> </span><span class="n">new_size</span><span class="p">);</span>
<span class="linenos">40</span>
<span class="linenos">41</span>
<span class="linenos">42</span><span class="k">private</span><span class="o">:</span>
<span class="linenos">43</span><span class="w">    </span><span class="n">Mode</span><span class="w"> </span><span class="n">mode_</span><span class="p">;</span>
<span class="linenos">44</span><span class="w">    </span><span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span><span class="w"> </span><span class="n">images_</span><span class="p">,</span><span class="w"> </span><span class="n">targets_</span><span class="p">;</span>
<span class="linenos">45</span><span class="p">};</span>
<span class="linenos">46</span><span class="p">}</span><span class="w"> </span><span class="c1">// namespace datasets</span>
</pre></div>
</div>
<p>This class’s implementation reads from the binary distribution of the CIFAR10 dataset and builds two tensors which hold the images and labels.</p>
<p>We use a subset of the dataset to use for calibration, since we don’t need the the full dataset for effective calibration and calibration does
some take time, then define the preprocessing to apply to the images in the dataset and create a DataLoader from the dataset which will batch the data:</p>
<div class="highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="k">auto</span><span class="w"> </span><span class="n">calibration_dataset</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">datasets</span><span class="o">::</span><span class="n">CIFAR10</span><span class="p">(</span><span class="n">data_dir</span><span class="p">,</span><span class="w"> </span><span class="n">datasets</span><span class="o">::</span><span class="n">CIFAR10</span><span class="o">::</span><span class="n">Mode</span><span class="o">::</span><span class="n">kTest</span><span class="p">)</span>
<span class="w">                                    </span><span class="p">.</span><span class="n">use_subset</span><span class="p">(</span><span class="mi">320</span><span class="p">)</span>
<span class="w">                                    </span><span class="p">.</span><span class="n">map</span><span class="p">(</span><span class="n">torch</span><span class="o">::</span><span class="n">data</span><span class="o">::</span><span class="n">transforms</span><span class="o">::</span><span class="n">Normalize</span><span class="o">&lt;&gt;</span><span class="p">({</span><span class="mf">0.4914</span><span class="p">,</span><span class="w"> </span><span class="mf">0.4822</span><span class="p">,</span><span class="w"> </span><span class="mf">0.4465</span><span class="p">},</span>
<span class="w">                                                                            </span><span class="p">{</span><span class="mf">0.2023</span><span class="p">,</span><span class="w"> </span><span class="mf">0.1994</span><span class="p">,</span><span class="w"> </span><span class="mf">0.2010</span><span class="p">}))</span>
<span class="w">                                    </span><span class="p">.</span><span class="n">map</span><span class="p">(</span><span class="n">torch</span><span class="o">::</span><span class="n">data</span><span class="o">::</span><span class="n">transforms</span><span class="o">::</span><span class="n">Stack</span><span class="o">&lt;&gt;</span><span class="p">());</span>
<span class="k">auto</span><span class="w"> </span><span class="n">calibration_dataloader</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">torch</span><span class="o">::</span><span class="n">data</span><span class="o">::</span><span class="n">make_data_loader</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">move</span><span class="p">(</span><span class="n">calibration_dataset</span><span class="p">),</span>
<span class="w">                                                            </span><span class="n">torch</span><span class="o">::</span><span class="n">data</span><span class="o">::</span><span class="n">DataLoaderOptions</span><span class="p">().</span><span class="n">batch_size</span><span class="p">(</span><span class="mi">32</span><span class="p">)</span>
<span class="w">                                                                                            </span><span class="p">.</span><span class="n">workers</span><span class="p">(</span><span class="mi">2</span><span class="p">));</span>
</pre></div>
</div>
<p>Next we create a calibrator from the <code class="docutils literal notranslate"><span class="pre">calibration_dataloader</span></code> using the calibrator factory (found in <code class="docutils literal notranslate"><span class="pre">torch_tensorrt/ptq.h</span></code>):</p>
<div class="highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="cp">#include</span><span class="w"> </span><span class="cpf">&quot;torch_tensorrt/ptq.h&quot;</span>
<span class="p">...</span>

<span class="k">auto</span><span class="w"> </span><span class="n">calibrator</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">torch_tensorrt</span><span class="o">::</span><span class="n">ptq</span><span class="o">::</span><span class="n">make_int8_calibrator</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">move</span><span class="p">(</span><span class="n">calibration_dataloader</span><span class="p">),</span><span class="w"> </span><span class="n">calibration_cache_file</span><span class="p">,</span><span class="w"> </span><span class="nb">true</span><span class="p">);</span>
</pre></div>
</div>
<p>Here we also define a location to write a calibration cache file to which we can use to reuse the calibration data without needing the dataset and whether or not
we should use the cache file if it exists. There also exists a <code class="docutils literal notranslate"><span class="pre">torch_tensorrt::ptq::make_int8_cache_calibrator</span></code> factory which creates a calibrator that uses the cache
only for cases where you may do engine building on a machine that has limited storage (i.e. no space for a full dataset) or to have a simpler deployment application.</p>
<p>The calibrator factories create a calibrator that inherits from a <code class="docutils literal notranslate"><span class="pre">nvinfer1::IInt8Calibrator</span></code> virtual class (<code class="docutils literal notranslate"><span class="pre">nvinfer1::IInt8EntropyCalibrator2</span></code> by default) which
defines the calibration algorithm used when calibrating. You can explicitly make the selection of calibration algorithm like this:</p>
<div class="highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="c1">// MinMax Calibrator is geared more towards NLP tasks</span>
<span class="k">auto</span><span class="w"> </span><span class="n">calibrator</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">torch_tensorrt</span><span class="o">::</span><span class="n">ptq</span><span class="o">::</span><span class="n">make_int8_calibrator</span><span class="o">&lt;</span><span class="n">nvinfer1</span><span class="o">::</span><span class="n">IInt8MinMaxCalibrator</span><span class="o">&gt;</span><span class="p">(</span><span class="n">std</span><span class="o">::</span><span class="n">move</span><span class="p">(</span><span class="n">calibration_dataloader</span><span class="p">),</span><span class="w"> </span><span class="n">calibration_cache_file</span><span class="p">,</span><span class="w"> </span><span class="nb">true</span><span class="p">);</span>
</pre></div>
</div>
<p>Then all that’s required to setup the module for INT8 calibration is to set the following compile settings in the <cite>torch_tensorrt::CompileSpec</cite> struct and compiling the module:</p>
<div class="highlight-c++ notranslate"><div class="highlight"><pre><span></span><span class="n">std</span><span class="o">::</span><span class="n">vector</span><span class="o">&lt;</span><span class="n">std</span><span class="o">::</span><span class="n">vector</span><span class="o">&lt;</span><span class="kt">int64_t</span><span class="o">&gt;&gt;</span><span class="w"> </span><span class="n">input_shape</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="p">{{</span><span class="mi">32</span><span class="p">,</span><span class="w"> </span><span class="mi">3</span><span class="p">,</span><span class="w"> </span><span class="mi">32</span><span class="p">,</span><span class="w"> </span><span class="mi">32</span><span class="p">}};</span>
<span class="c1">/// Configure settings for compilation</span>
<span class="k">auto</span><span class="w"> </span><span class="n">compile_spec</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">torch_tensorrt</span><span class="o">::</span><span class="n">CompileSpec</span><span class="p">({</span><span class="n">input_shape</span><span class="p">});</span>
<span class="c1">/// Set operating precision to INT8</span>
<span class="n">compile_spec</span><span class="p">.</span><span class="n">enabled_precisions</span><span class="p">.</span><span class="n">insert</span><span class="p">(</span><span class="n">torch</span><span class="o">::</span><span class="n">kF16</span><span class="p">);</span>
<span class="n">compile_spec</span><span class="p">.</span><span class="n">enabled_precisions</span><span class="p">.</span><span class="n">insert</span><span class="p">(</span><span class="n">torch</span><span class="o">::</span><span class="n">kI8</span><span class="p">);</span>
<span class="c1">/// Use the TensorRT Entropy Calibrator</span>
<span class="n">compile_spec</span><span class="p">.</span><span class="n">ptq_calibrator</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">calibrator</span><span class="p">;</span>

<span class="k">auto</span><span class="w"> </span><span class="n">trt_mod</span><span class="w"> </span><span class="o">=</span><span class="w"> </span><span class="n">torch_tensorrt</span><span class="o">::</span><span class="n">CompileGraph</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span><span class="w"> </span><span class="n">compile_spec</span><span class="p">);</span>
</pre></div>
</div>
<p>If you have an existing Calibrator implementation for TensorRT you may directly set the <code class="docutils literal notranslate"><span class="pre">ptq_calibrator</span></code> field with a pointer to your calibrator and it will work as well.
From here not much changes in terms of how to execution works. You are still able to fully use LibTorch as the sole interface for inference. Data should remain
in FP32 precision when it’s passed into <cite>trt_mod.forward</cite>. There exists an example application in the Torch-TensorRT demo that takes you from training a VGG16 network on
CIFAR10 to deploying in INT8 with Torch-TensorRT here: <a class="reference external" href="https://github.com/pytorch/TensorRT/tree/master/cpp/ptq">https://github.com/pytorch/TensorRT/tree/master/cpp/ptq</a></p>
</section>
<section id="how-to-create-your-own-ptq-application-in-python">
<span id="writing-ptq-python"></span><h2>How to create your own PTQ application in Python<a class="headerlink" href="#how-to-create-your-own-ptq-application-in-python" title="Permalink to this heading">¶</a></h2>
<p>Torch-TensorRT Python API provides an easy and convenient way to use pytorch dataloaders with TensorRT calibrators. <code class="docutils literal notranslate"><span class="pre">DataLoaderCalibrator</span></code> class can be used to create
a TensorRT calibrator by providing desired configuration. The following code demonstrates an example on how to use it</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">testing_dataset</span> <span class="o">=</span> <span class="n">torchvision</span><span class="o">.</span><span class="n">datasets</span><span class="o">.</span><span class="n">CIFAR10</span><span class="p">(</span>
    <span class="n">root</span><span class="o">=</span><span class="s2">&quot;./data&quot;</span><span class="p">,</span>
    <span class="n">train</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
    <span class="n">download</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
    <span class="n">transform</span><span class="o">=</span><span class="n">transforms</span><span class="o">.</span><span class="n">Compose</span><span class="p">(</span>
        <span class="p">[</span>
            <span class="n">transforms</span><span class="o">.</span><span class="n">ToTensor</span><span class="p">(),</span>
            <span class="n">transforms</span><span class="o">.</span><span class="n">Normalize</span><span class="p">((</span><span class="mf">0.4914</span><span class="p">,</span> <span class="mf">0.4822</span><span class="p">,</span> <span class="mf">0.4465</span><span class="p">),</span> <span class="p">(</span><span class="mf">0.2023</span><span class="p">,</span> <span class="mf">0.1994</span><span class="p">,</span> <span class="mf">0.2010</span><span class="p">)),</span>
        <span class="p">]</span>
    <span class="p">),</span>
<span class="p">)</span>

<span class="n">testing_dataloader</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">utils</span><span class="o">.</span><span class="n">data</span><span class="o">.</span><span class="n">DataLoader</span><span class="p">(</span>
    <span class="n">testing_dataset</span><span class="p">,</span> <span class="n">batch_size</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">shuffle</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">num_workers</span><span class="o">=</span><span class="mi">1</span>
<span class="p">)</span>
<span class="n">calibrator</span> <span class="o">=</span> <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">ptq</span><span class="o">.</span><span class="n">DataLoaderCalibrator</span><span class="p">(</span>
    <span class="n">testing_dataloader</span><span class="p">,</span>
    <span class="n">cache_file</span><span class="o">=</span><span class="s2">&quot;./calibration.cache&quot;</span><span class="p">,</span>
    <span class="n">use_cache</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
    <span class="n">algo_type</span><span class="o">=</span><span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">ptq</span><span class="o">.</span><span class="n">CalibrationAlgo</span><span class="o">.</span><span class="n">ENTROPY_CALIBRATION_2</span><span class="p">,</span>
    <span class="n">device</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s2">&quot;cuda:0&quot;</span><span class="p">),</span>
<span class="p">)</span>

<span class="n">trt_mod</span> <span class="o">=</span> <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">compile</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">Input</span><span class="p">((</span><span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">))],</span>
                                    <span class="n">enabled_precisions</span><span class="o">=</span><span class="p">{</span><span class="n">torch</span><span class="o">.</span><span class="n">float</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">half</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">int8</span><span class="p">},</span>
                                    <span class="n">calibrator</span><span class="o">=</span><span class="n">calibrator</span><span class="p">,</span>
                                    <span class="n">device</span><span class="o">=</span><span class="p">{</span>
                                         <span class="s2">&quot;device_type&quot;</span><span class="p">:</span> <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">DeviceType</span><span class="o">.</span><span class="n">GPU</span><span class="p">,</span>
                                         <span class="s2">&quot;gpu_id&quot;</span><span class="p">:</span> <span class="mi">0</span><span class="p">,</span>
                                         <span class="s2">&quot;dla_core&quot;</span><span class="p">:</span> <span class="mi">0</span><span class="p">,</span>
                                         <span class="s2">&quot;allow_gpu_fallback&quot;</span><span class="p">:</span> <span class="kc">False</span><span class="p">,</span>
                                         <span class="s2">&quot;disable_tf32&quot;</span><span class="p">:</span> <span class="kc">False</span>
                                     <span class="p">})</span>
</pre></div>
</div>
<p>In the cases where there is a pre-existing calibration cache file that users want to use, <code class="docutils literal notranslate"><span class="pre">CacheCalibrator</span></code> can be used without any dataloaders. The following example demonstrates how
to use <code class="docutils literal notranslate"><span class="pre">CacheCalibrator</span></code> to use in INT8 mode.</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">calibrator</span> <span class="o">=</span> <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">ptq</span><span class="o">.</span><span class="n">CacheCalibrator</span><span class="p">(</span><span class="s2">&quot;./calibration.cache&quot;</span><span class="p">)</span>

<span class="n">trt_mod</span> <span class="o">=</span> <span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">compile</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">inputs</span><span class="o">=</span><span class="p">[</span><span class="n">torch_tensorrt</span><span class="o">.</span><span class="n">Input</span><span class="p">([</span><span class="mi">1</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">32</span><span class="p">,</span> <span class="mi">32</span><span class="p">])],</span>
                                      <span class="n">enabled_precisions</span><span class="o">=</span><span class="p">{</span><span class="n">torch</span><span class="o">.</span><span class="n">float</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">half</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">int8</span><span class="p">},</span>
                                      <span class="n">calibrator</span><span class="o">=</span><span class="n">calibrator</span><span class="p">)</span>
</pre></div>
</div>
<p>If you already have an existing calibrator class (implemented directly using TensorRT API), you can directly set the calibrator field to your class which can be very convenient.
For a demo on how PTQ can be performed on a VGG network using Torch-TensorRT API, you can refer to <a class="reference external" href="https://github.com/pytorch/TensorRT/blob/master/tests/py/test_ptq_dataloader_calibrator.py">https://github.com/pytorch/TensorRT/blob/master/tests/py/test_ptq_dataloader_calibrator.py</a>
and <a class="reference external" href="https://github.com/pytorch/TensorRT/blob/master/tests/py/test_ptq_trt_calibrator.py">https://github.com/pytorch/TensorRT/blob/master/tests/py/test_ptq_trt_calibrator.py</a></p>
<section id="citations">
<h3>Citations<a class="headerlink" href="#citations" title="Permalink to this heading">¶</a></h3>
<p>Krizhevsky, A., &amp; Hinton, G. (2009). Learning multiple layers of features from tiny images.</p>
<p>Simonyan, K., &amp; Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.</p>
</section>
</section>
</section>


             </article>
             
            </div>
            <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="saving_models.html" class="btn btn-neutral float-right" title="Saving models compiled with Torch-TensorRT" accesskey="n" rel="next">Next <img src="../_static/images/chevron-right-orange.svg" class="next-page"></a>
      
      
        <a href="dynamic_shapes.html" class="btn btn-neutral" title="Dynamic shapes with Torch-TensorRT" accesskey="p" rel="prev"><img src="../_static/images/chevron-right-orange.svg" class="previous-page"> Previous</a>
      
    </div>
  

  

    <hr>

  

  <div role="contentinfo">
    <p>
        &copy; Copyright 2022, NVIDIA Corporation.

    </p>
  </div>
    
      <div>
        Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
      </div>
     

</footer>

          </div>
        </div>

        <div class="pytorch-content-right" id="pytorch-content-right">
          <div class="pytorch-right-menu" id="pytorch-right-menu">
            <div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
              <ul>
<li><a class="reference internal" href="#">Post Training Quantization (PTQ)</a><ul>
<li><a class="reference internal" href="#how-to-create-your-own-ptq-application-in-c">How to create your own PTQ application in C++</a></li>
<li><a class="reference internal" href="#how-to-create-your-own-ptq-application-in-python">How to create your own PTQ application in Python</a><ul>
<li><a class="reference internal" href="#citations">Citations</a></li>
</ul>
</li>
</ul>
</li>
</ul>

            </div>
          </div>
        </div>
      </section>
    </div>

  


  

     
       <script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
         <script data-url_root="../" id="documentation_options" src="../_static/documentation_options.js"></script>
         <script src="../_static/jquery.js"></script>
         <script src="../_static/underscore.js"></script>
         <script src="../_static/_sphinx_javascript_frameworks_compat.js"></script>
         <script src="../_static/doctools.js"></script>
         <script src="../_static/collapsible-lists/js/CollapsibleLists.compressed.js"></script>
         <script src="../_static/collapsible-lists/js/apply-collapsible-lists.js"></script>
         <script crossorigin="anonymous" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js"></script>
     

  

  <script type="text/javascript" src="../_static/js/vendor/popper.min.js"></script>
  <script type="text/javascript" src="../_static/js/vendor/bootstrap.min.js"></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
  <script type="text/javascript" src="../_static/js/theme.js"></script>

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script> 

  <!-- Begin Footer -->

  <div class="container-fluid docs-tutorials-resources" id="docs-tutorials-resources">
    <div class="container">
      <div class="row">
        <div class="col-md-4 text-center">
          <h2>Docs</h2>
          <p>Access comprehensive developer documentation for PyTorch</p>
          <a class="with-right-arrow" href="https://pytorch.org/docs/stable/index.html">View Docs</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Tutorials</h2>
          <p>Get in-depth tutorials for beginners and advanced developers</p>
          <a class="with-right-arrow" href="https://pytorch.org/tutorials">View Tutorials</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Resources</h2>
          <p>Find development resources and get your questions answered</p>
          <a class="with-right-arrow" href="https://pytorch.org/resources">View Resources</a>
        </div>
      </div>
    </div>
  </div>

  <footer class="site-footer">
    <div class="container footer-container">
      <div class="footer-logo-wrapper">
        <a href="https://pytorch.org/" class="footer-logo"></a>
      </div>

      <div class="footer-links-wrapper">
        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/">PyTorch</a></li>
            <li><a href="https://pytorch.org/get-started">Get Started</a></li>
            <li><a href="https://pytorch.org/features">Features</a></li>
            <li><a href="https://pytorch.org/ecosystem">Ecosystem</a></li>
            <li><a href="https://pytorch.org/blog/">Blog</a></li>
            <li><a href="https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md">Contributing</a></li>
          </ul>
        </div>

        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/resources">Resources</a></li>
            <li><a href="https://pytorch.org/tutorials">Tutorials</a></li>
            <li><a href="https://pytorch.org/docs/stable/index.html">Docs</a></li>
            <li><a href="https://discuss.pytorch.org" target="_blank">Discuss</a></li>
            <li><a href="https://github.com/pytorch/pytorch/issues" target="_blank">Github Issues</a></li>
            <li><a href="https://pytorch.org/assets/brand-guidelines/PyTorch-Brand-Guidelines.pdf" target="_blank">Brand Guidelines</a></li>
          </ul>
        </div>

        <div class="footer-links-col">
          <ul>
            <li class="list-title">Stay up to date</li>
            <li><a href="https://www.facebook.com/pytorch" target="_blank">Facebook</a></li>
            <li><a href="https://twitter.com/pytorch" target="_blank">Twitter</a></li>
            <li><a href="https://www.youtube.com/pytorch" target="_blank">YouTube</a></li>
            <li><a href="https://www.linkedin.com/company/pytorch" target="_blank">LinkedIn</a></li>
          </ul>  
          </div>

        <div class="footer-links-col">
          <ul>
            <li class="list-title">PyTorch Podcasts</li>
            <li><a href="https://open.spotify.com/show/6UzHKeiy368jKfQMKKvJY5" target="_blank">Spotify</a></li>
            <li><a href="https://podcasts.apple.com/us/podcast/pytorch-developer-podcast/id1566080008" target="_blank">Apple</a></li>
            <li><a href="https://www.google.com/podcasts?feed=aHR0cHM6Ly9mZWVkcy5zaW1wbGVjYXN0LmNvbS9PQjVGa0lsOA%3D%3D" target="_blank">Google</a></li>
            <li><a href="https://music.amazon.com/podcasts/7a4e6f0e-26c2-49e9-a478-41bd244197d0/PyTorch-Developer-Podcast?" target="_blank">Amazon</a></li>
          </ul>
         </div>
        </div>
        
        <div class="privacy-policy">
          <ul>
            <li class="privacy-policy-links"><a href="https://www.linuxfoundation.org/terms/" target="_blank">Terms</a></li>
            <li class="privacy-policy-links">|</li>
            <li class="privacy-policy-links"><a href="https://www.linuxfoundation.org/privacy-policy/" target="_blank">Privacy</a></li>
          </ul>
        </div>
        <div class="copyright">
        <p>© Copyright The Linux Foundation. The PyTorch Foundation is a project of The Linux Foundation.
          For web site terms of use, trademark policy and other policies applicable to The PyTorch Foundation please see
          <a href="https://www.linuxfoundation.org/policies/">www.linuxfoundation.org/policies/</a>. The PyTorch Foundation supports the PyTorch open source
          project, which has been established as PyTorch Project a Series of LF Projects, LLC. For policies applicable to the PyTorch Project a Series of LF Projects, LLC,
          please see <a href="https://www.lfprojects.org/policies/">www.lfprojects.org/policies/</a>.</p>
      </div>
     </div>

  </footer>

  <div class="cookie-banner-wrapper">
  <div class="container">
    <p class="gdpr-notice">To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: <a href="https://www.facebook.com/policies/cookies/">Cookies Policy</a>.</p>
    <img class="close-button" src="../_static/images/pytorch-x.svg">
  </div>
</div>

  <!-- End Footer -->

  <!-- Begin Mobile Menu -->

  <div class="mobile-main-menu">
    <div class="container-fluid">
      <div class="container">
        <div class="mobile-main-menu-header-container">
          <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
          <a class="main-menu-close-button" href="#" data-behavior="close-mobile-menu"></a>
        </div>
      </div>
    </div>

    <div class="mobile-main-menu-links-container">
      <div class="main-menu">
        <ul>
           <li class="resources-mobile-menu-title">
             <a>Learn</a>
           </li>
           <ul class="resources-mobile-menu-items">
             <li>
               <a href="https://pytorch.org/get-started">Get Started</a>
             </li>
             <li>
               <a href="https://pytorch.org/tutorials">Tutorials</a>
             </li>
             <li>
               <a href="https://pytorch.org/tutorials/beginner/basics/intro.html">Learn the Basics</a>
             </li>
             <li>
               <a href="https://pytorch.org/tutorials/recipes/recipes_index.html">PyTorch Recipes</a>
             </li>
             <li>
               <a href="https://pytorch.org/tutorials/beginner/introyt.html">Introduction to PyTorch - YouTube Series</a>
             </li>
           </ul>
           <li class="resources-mobile-menu-title">
             <a>Ecosystem</a>
           </li>
           <ul class="resources-mobile-menu-items">
             <li>
               <a href="https://pytorch.org/ecosystem">Tools</a>
             </li>
             <li>
               <a href="https://pytorch.org/#community-module">Community</a>
             </li>
             <li>
               <a href="https://discuss.pytorch.org/">Forums</a>
             </li>
             <li>
               <a href="https://pytorch.org/resources">Developer Resources</a>
             </li>
             <li>
               <a href="https://pytorch.org/ecosystem/contributor-awards-2023">Contributor Awards - 2023</a>
             </li>
           </ul>

           <li class="resources-mobile-menu-title">
             <a>Edge</a>
           </li>

           <ul class="resources-mobile-menu-items">
             <li>
               <a href="https://pytorch.org/edge">About PyTorch Edge</a>
             </li>
             
             <li>
               <a href="https://pytorch.org/executorch-overview">ExecuTorch</a>
             </li>
           </ul>

           <li class="resources-mobile-menu-title">
             <a>Docs</a>
           </li>

           <ul class="resources-mobile-menu-items">
            <li>
              <a href="https://pytorch.org/docs/stable/index.html">PyTorch</a>
            </li>

            <li>
              <a href="https://pytorch.org/pytorch-domains">PyTorch Domains</a>
            </li>
          </ul>

          <li class="resources-mobile-menu-title">
            <a>Blog & News</a>
          </li>
            
           <ul class="resources-mobile-menu-items">
            <li>
              <a href="https://pytorch.org/blog/">PyTorch Blog</a>
            </li>
            <li>
              <a href="https://pytorch.org/community-blog">Community Blog</a>
            </li>

            <li>
              <a href="https://pytorch.org/videos">Videos</a>
            </li>

            <li>
              <a href="https://pytorch.org/community-stories">Community Stories</a>
            </li>
            <li>
              <a href="https://pytorch.org/events">Events</a>
            </li>
          </ul>
          
          <li class="resources-mobile-menu-title">
            <a>About</a>
          </li>

          <ul class="resources-mobile-menu-items">
            <li>
              <a href="https://pytorch.org/foundation">PyTorch Foundation</a>
            </li>
            <li>
              <a href="https://pytorch.org/governing-board">Governing Board</a>
            </li>
          </ul>
        </ul>
      </div>
    </div>
  </div>

  <!-- End Mobile Menu -->

  <script type="text/javascript" src="../_static/js/vendor/anchor.min.js"></script>

  <script type="text/javascript">
    $(document).ready(function() {
      mobileMenu.bind();
      mobileTOC.bind();
      pytorchAnchors.bind();
      sideMenus.bind();
      scrollToAnchor.bind();
      highlightNavigation.bind();
      mainMenuDropdown.bind();
      filterTags.bind();

      // Add class to links that have code blocks, since we cannot create links in code blocks
      $("article.pytorch-article a span.pre").each(function(e) {
        $(this).closest("a").addClass("has-code");
      });
    })
  </script>
</body>
</html>