


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>Quantization &mdash; PyTorch master documentation</title>
  

  
  
  
  
    <link rel="canonical" href="https://pytorch.org/docs/stable/quantization.html"/>
  

  

  
  
    

  

  <link rel="stylesheet" href="_static/css/theme.css" type="text/css" />
  <!-- <link rel="stylesheet" href="_static/pygments.css" type="text/css" /> -->
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="_static/css/jit.css" type="text/css" />
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="_static/katex-math.css" type="text/css" />
    <link rel="index" title="Index" href="genindex.html" />
    <link rel="search" title="Search" href="search.html" />
    <link rel="next" title="Distributed RPC Framework" href="rpc/index.html" />
    <link rel="prev" title="torch.optim" href="optim.html" /> 

  
  <script src="_static/js/modernizr.min.js"></script>

  <!-- Preload the theme fonts -->

<link rel="preload" href="_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">

<!-- Preload the katex fonts -->

<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
</head>

<div class="container-fluid header-holder tutorials-header" id="header-holder">
  <div class="container">
    <div class="header-container">
      <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>

      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <div class="ecosystem-dropdown">
              <a id="dropdownMenuButton" data-toggle="ecosystem-dropdown">
                Ecosystem
              </a>
              <div class="ecosystem-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/hub"">
                  <span class=dropdown-title>Models (Beta)</span>
                  <p>Discover, publish, and reuse pre-trained models</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/ecosystem">
                  <span class=dropdown-title>Tools & Libraries</span>
                  <p>Explore the ecosystem of tools and libraries</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <div class="resources-dropdown">
              <a id="resourcesDropdownButton" data-toggle="resources-dropdown">
                Resources
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/resources"">
                  <span class=dropdown-title>Developer Resources</span>
                  <p>Find resources and get questions answered</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/features">
                  <span class=dropdown-title>About</span>
                  <p>Learn about PyTorch’s features and capabilities</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>

      <a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
    </div>

  </div>
</div>


<body class="pytorch-body">

   

    

    <div class="table-of-contents-link-wrapper">
      <span>Table of Contents</span>
      <a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
    </div>

    <nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
      <div class="pytorch-side-scroll">
        <div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          <div class="pytorch-left-menu-search">
            

            
              
              
                <div class="version">
                  master (1.5.0 )
                </div>
              
            

            


  


<div role="search">
  <form id="rtd-search-form" class="wy-form" action="search.html" method="get">
    <input type="text" name="q" placeholder="Search Docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

            
          </div>

          
<div>
  <a style="color:#F05732" href="https://pytorch.org/docs/stable/quantization.html">
    You are viewing unstable developer preview docs.
    Click here to view docs for latest stable release.
  </a>
</div>

            
            
              
            
            
              <p class="caption"><span class="caption-text">Notes</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="notes/amp_examples.html">Automatic Mixed Precision examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/autograd.html">Autograd mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/broadcasting.html">Broadcasting semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/cpu_threading_torchscript_inference.html">CPU threading and TorchScript inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/cuda.html">CUDA semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/ddp.html">Distributed Data Parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/extending.html">Extending PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/large_scale_deployments.html">Features for large-scale deployments</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/multiprocessing.html">Multiprocessing best practices</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/randomness.html">Reproducibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/serialization.html">Serialization semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="notes/windows.html">Windows FAQ</a></li>
</ul>
<p class="caption"><span class="caption-text">Language Bindings</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/cppdocs/">C++ API</a></li>
<li class="toctree-l1"><a class="reference internal" href="packages.html">Javadoc</a></li>
</ul>
<p class="caption"><span class="caption-text">Python API</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="torch.html">torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="nn.html">torch.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="nn.functional.html">torch.nn.functional</a></li>
<li class="toctree-l1"><a class="reference internal" href="tensors.html">torch.Tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="tensor_attributes.html">Tensor Attributes</a></li>
<li class="toctree-l1"><a class="reference internal" href="tensor_view.html">Tensor Views</a></li>
<li class="toctree-l1"><a class="reference internal" href="autograd.html">torch.autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="cuda.html">torch.cuda</a></li>
<li class="toctree-l1"><a class="reference internal" href="amp.html">torch.cuda.amp</a></li>
<li class="toctree-l1"><a class="reference internal" href="distributed.html">torch.distributed</a></li>
<li class="toctree-l1"><a class="reference internal" href="distributions.html">torch.distributions</a></li>
<li class="toctree-l1"><a class="reference internal" href="hub.html">torch.hub</a></li>
<li class="toctree-l1"><a class="reference internal" href="jit.html">torch.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="nn.init.html">torch.nn.init</a></li>
<li class="toctree-l1"><a class="reference internal" href="onnx.html">torch.onnx</a></li>
<li class="toctree-l1"><a class="reference internal" href="optim.html">torch.optim</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">Quantization</a></li>
<li class="toctree-l1"><a class="reference internal" href="rpc/index.html">Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="random.html">torch.random</a></li>
<li class="toctree-l1"><a class="reference internal" href="sparse.html">torch.sparse</a></li>
<li class="toctree-l1"><a class="reference internal" href="storage.html">torch.Storage</a></li>
<li class="toctree-l1"><a class="reference internal" href="bottleneck.html">torch.utils.bottleneck</a></li>
<li class="toctree-l1"><a class="reference internal" href="checkpoint.html">torch.utils.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="cpp_extension.html">torch.utils.cpp_extension</a></li>
<li class="toctree-l1"><a class="reference internal" href="data.html">torch.utils.data</a></li>
<li class="toctree-l1"><a class="reference internal" href="dlpack.html">torch.utils.dlpack</a></li>
<li class="toctree-l1"><a class="reference internal" href="model_zoo.html">torch.utils.model_zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="tensorboard.html">torch.utils.tensorboard</a></li>
<li class="toctree-l1"><a class="reference internal" href="type_info.html">Type Info</a></li>
<li class="toctree-l1"><a class="reference internal" href="named_tensor.html">Named Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="name_inference.html">Named Tensors operator coverage</a></li>
<li class="toctree-l1"><a class="reference internal" href="__config__.html">torch.__config__</a></li>
</ul>
<p class="caption"><span class="caption-text">Libraries</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/audio">torchaudio</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/text">torchtext</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/elastic/">TorchElastic</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/serve">TorchServe</a></li>
<li class="toctree-l1"><a class="reference external" href="http://pytorch.org/xla/">PyTorch on XLA Devices</a></li>
</ul>
<p class="caption"><span class="caption-text">Community</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="community/contribution_guide.html">PyTorch Contribution Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="community/governance.html">PyTorch Governance</a></li>
<li class="toctree-l1"><a class="reference internal" href="community/persons_of_interest.html">PyTorch Governance | Persons of Interest</a></li>
</ul>

            
          

        </div>
      </div>
    </nav>

    <div class="pytorch-container">
      <div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
        <div class="pytorch-breadcrumbs-wrapper">
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="pytorch-breadcrumbs">
    
      <li>
        <a href="index.html">
          
            Docs
          
        </a> &gt;
      </li>

        
      <li>Quantization</li>
    
    
      <li class="pytorch-breadcrumbs-aside">
        
            
            <a href="_sources/quantization.rst.txt" rel="nofollow"><img src="_static/images/view-page-source-icon.svg"></a>
          
        
      </li>
    
  </ul>

  
</div>
        </div>

        <div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
          Shortcuts
        </div>
      </div>

      <section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
        <div class="pytorch-content-left">

        
          
          <div class="rst-content">
          
            <div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
             <article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
              
  <div class="section" id="quantization">
<span id="quantization-doc"></span><h1>Quantization<a class="headerlink" href="#quantization" title="Permalink to this headline">¶</a></h1>
<div class="section" id="introduction-to-quantization">
<h2>Introduction to Quantization<a class="headerlink" href="#introduction-to-quantization" title="Permalink to this headline">¶</a></h2>
<p>Quantization refers to techniques for performing computations and storing tensors at lower bitwidths than
floating point precision. A quantized model executes some or all of the operations on tensors with
integers rather than floating point values. This allows for a more
compact model representation and the use of high performance vectorized
operations on many hardware platforms. PyTorch supports INT8
quantization compared to typical FP32 models allowing for a 4x reduction in the model size and
a 4x reduction in memory bandwidth requirements.  Hardware support for  INT8 computations
is typically 2 to 4 times faster compared to FP32 compute. Quantization is primarily a technique
to speed up inference and only the forward pass is supported for quantized operators.</p>
<p>PyTorch supports multiple approaches to quantizing a deep learning model. In most cases the model is trained
in FP32 and then the model is converted to INT8. In addition, PyTorch also supports quantization aware
training, which models quantization errors in both the forward and backward passes using fake-quantization
modules. Note that the entire computation is carried out in floating point. At the end of quantization aware
training, PyTorch provides conversion functions to convert the trained model into lower precision.</p>
<p>At lower level, PyTorch provides a way to represent quantized tensors and
perform operations with them. They can be used to directly construct models that
perform all or part of the computation in lower precision. Higher-level APIs are
provided that incorporate typical workflows of converting FP32 model to lower
precision with minimal accuracy loss.</p>
<p>Today, PyTorch supports the following backends for running quantized operators efficiently:</p>
<ul class="simple">
<li><p>x86 CPUs with AVX2 support or higher (without AVX2 some operations have inefficient implementations)</p></li>
<li><p>ARM CPUs (typically found in mobile/embedded devices)</p></li>
</ul>
<p>The corresponding implementation is chosen automatically based on the PyTorch build mode.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>PyTorch 1.3 doesn’t provide quantized operator implementations on CUDA yet - this is direction of future work.
Move the model to CPU in order to test the quantized functionality.</p>
<p>Quantization-aware training (through <a class="reference internal" href="#torch.quantization.FakeQuantize" title="torch.quantization.FakeQuantize"><code class="xref py py-class docutils literal notranslate"><span class="pre">FakeQuantize</span></code></a>) supports both CPU and CUDA.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>When preparing a quantized model, it is necessary to ensure that qconfig and the engine used for quantized computations match
the backend on which the model will be executed. Quantization currently supports two backends: fbgemm (for use on x86,
<a class="reference external" href="https://github.com/pytorch/FBGEMM">https://github.com/pytorch/FBGEMM</a>) and qnnpack (for use on the ARM QNNPACK library <a class="reference external" href="https://github.com/pytorch/QNNPACK">https://github.com/pytorch/QNNPACK</a>).
For example, if you are interested in quantizing a model to run on ARM, it is recommended to set the qconfig by calling:</p>
<p><code class="docutils literal notranslate"><span class="pre">qconfig</span> <span class="pre">=</span> <span class="pre">torch.quantization.get_default_qconfig('qnnpack')</span></code></p>
<p>for post training quantization and</p>
<p><code class="docutils literal notranslate"><span class="pre">qconfig</span> <span class="pre">=</span> <span class="pre">torch.quantization.get_default_qat_qconfig('qnnpack')</span></code></p>
<p>for quantization aware training.</p>
<p>In addition, the torch.backends.quantized.engine parameter should be set to match the backend. For using qnnpack for inference, the
backend is set to qnnpack as follows</p>
<p><code class="docutils literal notranslate"><span class="pre">torch.backends.quantized.engine</span> <span class="pre">=</span> <span class="pre">'qnnpack'</span></code></p>
</div>
</div>
<div class="section" id="quantized-tensors">
<h2>Quantized Tensors<a class="headerlink" href="#quantized-tensors" title="Permalink to this headline">¶</a></h2>
<p>PyTorch supports both per tensor and per channel asymmetric linear
quantization. Per tensor means that all the values within the tensor are
scaled the same way. Per channel means that for each dimension, typically
the channel dimension of a tensor, the values
in the tensor are scaled and offset by a different value (effectively
the scale and offset become vectors). This allows for lesser error in converting tensors
to quantized values.</p>
<p>The mapping is performed by converting the floating point tensors using</p>
<a class="reference internal image-reference" href="_images/math-quantizer-equation.png"><img alt="_images/math-quantizer-equation.png" src="_images/math-quantizer-equation.png" style="width: 40%;" /></a>
<p>Note that, we ensure that zero in floating point is represented with no error after quantization,
thereby ensuring that operations like padding do not cause additional quantization error.</p>
<p>In order to do quantization in PyTorch, we need to be able to represent
quantized data in Tensors. A Quantized Tensor allows for storing
quantized data (represented as int8/uint8/int32) along with quantization
parameters like scale and zero_point. Quantized Tensors allow for many
useful operations making quantized arithmetic easy, in addition to
allowing for serialization of data in a quantized format.</p>
</div>
<div class="section" id="operation-coverage">
<h2>Operation coverage<a class="headerlink" href="#operation-coverage" title="Permalink to this headline">¶</a></h2>
<p>Quantized Tensors support a limited subset of data manipulation methods of the regular
full-precision tensor. (see list below)</p>
<p>For NN operators included in PyTorch, we restrict support to:</p>
<blockquote>
<div><ol class="arabic simple">
<li><p>8 bit weights (data_type = qint8)</p></li>
<li><p>8 bit activations (data_type = quint8)</p></li>
</ol>
</div></blockquote>
<p>Note that operator implementations currently only
support per channel quantization for weights of the <strong>conv</strong> and <strong>linear</strong>
operators. Furthermore the minimum and the maximum of the input data is
mapped linearly to the minimum and the maximum of the quantized data
type such that zero is represented with no quantization error.</p>
<p>Additional data types and quantization schemes can be implemented through
the <a class="reference external" href="https://pytorch.org/tutorials/advanced/torch_script_custom_ops.html">custom operator mechanism</a>.</p>
<p>Many operations for quantized tensors are available under the same API as full
float version in <code class="docutils literal notranslate"><span class="pre">torch</span></code> or <code class="docutils literal notranslate"><span class="pre">torch.nn</span></code>. Quantized version of NN modules that
perform re-quantization are available in <code class="docutils literal notranslate"><span class="pre">torch.nn.quantized</span></code>. Those
operations explicitly take output quantization parameters (scale and zero_point) in
the operation signature.</p>
<p>In addition, we also support fused versions corresponding to common fusion patterns that impact quantization at:
torch.nn.intrinsic.quantized.</p>
<p>For quantization aware training, we support modules prepared for quantization aware training at
torch.nn.qat and torch.nn.intrinsic.qat</p>
<p>Current quantized operation list is sufficient to cover typical CNN and RNN
models:</p>
<div class="section" id="quantized-torch-tensor-operations">
<h3>Quantized <code class="docutils literal notranslate"><span class="pre">torch.Tensor</span></code> operations<a class="headerlink" href="#quantized-torch-tensor-operations" title="Permalink to this headline">¶</a></h3>
<p>Operations that are available from the <code class="docutils literal notranslate"><span class="pre">torch</span></code> namespace or as methods on Tensor for quantized tensors:</p>
<ul class="simple">
<li><p><a class="reference internal" href="torch.html#torch.quantize_per_tensor" title="torch.quantize_per_tensor"><code class="xref py py-func docutils literal notranslate"><span class="pre">quantize_per_tensor()</span></code></a> - Convert float tensor to quantized tensor with per-tensor scale and zero point</p></li>
<li><p><a class="reference internal" href="torch.html#torch.quantize_per_channel" title="torch.quantize_per_channel"><code class="xref py py-func docutils literal notranslate"><span class="pre">quantize_per_channel()</span></code></a> - Convert float tensor to quantized tensor with per-channel scale and zero point</p></li>
<li><p>View-based operations like <a class="reference internal" href="tensors.html#torch.Tensor.view" title="torch.Tensor.view"><code class="xref py py-meth docutils literal notranslate"><span class="pre">view()</span></code></a>, <a class="reference internal" href="tensors.html#torch.Tensor.as_strided" title="torch.Tensor.as_strided"><code class="xref py py-meth docutils literal notranslate"><span class="pre">as_strided()</span></code></a>, <a class="reference internal" href="tensors.html#torch.Tensor.expand" title="torch.Tensor.expand"><code class="xref py py-meth docutils literal notranslate"><span class="pre">expand()</span></code></a>, <a class="reference internal" href="tensors.html#torch.Tensor.flatten" title="torch.Tensor.flatten"><code class="xref py py-meth docutils literal notranslate"><span class="pre">flatten()</span></code></a>, <a class="reference internal" href="tensors.html#torch.Tensor.select" title="torch.Tensor.select"><code class="xref py py-meth docutils literal notranslate"><span class="pre">select()</span></code></a>, python-style indexing, etc - work as on regular tensor (if quantization is not per-channel)</p></li>
<li><dl class="simple">
<dt>Comparators</dt><dd><ul>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.ne" title="torch.Tensor.ne"><code class="xref py py-meth docutils literal notranslate"><span class="pre">ne()</span></code></a> — Not equal</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.eq" title="torch.Tensor.eq"><code class="xref py py-meth docutils literal notranslate"><span class="pre">eq()</span></code></a> — Equal</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.ge" title="torch.Tensor.ge"><code class="xref py py-meth docutils literal notranslate"><span class="pre">ge()</span></code></a> — Greater or equal</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.le" title="torch.Tensor.le"><code class="xref py py-meth docutils literal notranslate"><span class="pre">le()</span></code></a> — Less or equal</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.gt" title="torch.Tensor.gt"><code class="xref py py-meth docutils literal notranslate"><span class="pre">gt()</span></code></a> — Greater</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.lt" title="torch.Tensor.lt"><code class="xref py py-meth docutils literal notranslate"><span class="pre">lt()</span></code></a> — Less</p></li>
</ul>
</dd>
</dl>
</li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.copy_" title="torch.Tensor.copy_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">copy_()</span></code></a> — Copies src to self in-place</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.clone" title="torch.Tensor.clone"><code class="xref py py-meth docutils literal notranslate"><span class="pre">clone()</span></code></a> —  Returns a deep copy of the passed-in tensor</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.dequantize" title="torch.Tensor.dequantize"><code class="xref py py-meth docutils literal notranslate"><span class="pre">dequantize()</span></code></a> — Convert quantized tensor to float tensor</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.equal" title="torch.Tensor.equal"><code class="xref py py-meth docutils literal notranslate"><span class="pre">equal()</span></code></a> — Compares two tensors, returns true if quantization parameters and all integer elements are the same</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.int_repr" title="torch.Tensor.int_repr"><code class="xref py py-meth docutils literal notranslate"><span class="pre">int_repr()</span></code></a> — Prints the underlying integer representation of the quantized tensor</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.max" title="torch.Tensor.max"><code class="xref py py-meth docutils literal notranslate"><span class="pre">max()</span></code></a> — Returns the maximum value of the tensor (reduction only)</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.mean" title="torch.Tensor.mean"><code class="xref py py-meth docutils literal notranslate"><span class="pre">mean()</span></code></a> — Mean function. Supported variants: reduction, dim, out</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.min" title="torch.Tensor.min"><code class="xref py py-meth docutils literal notranslate"><span class="pre">min()</span></code></a> — Returns the minimum value of the tensor (reduction only)</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.q_scale" title="torch.Tensor.q_scale"><code class="xref py py-meth docutils literal notranslate"><span class="pre">q_scale()</span></code></a> — Returns the scale of the per-tensor quantized tensor</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.q_zero_point" title="torch.Tensor.q_zero_point"><code class="xref py py-meth docutils literal notranslate"><span class="pre">q_zero_point()</span></code></a> — Returns the zero_point of the per-tensor quantized zero point</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.q_per_channel_scales" title="torch.Tensor.q_per_channel_scales"><code class="xref py py-meth docutils literal notranslate"><span class="pre">q_per_channel_scales()</span></code></a> — Returns the scales of the per-channel quantized tensor</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.q_per_channel_zero_points" title="torch.Tensor.q_per_channel_zero_points"><code class="xref py py-meth docutils literal notranslate"><span class="pre">q_per_channel_zero_points()</span></code></a> — Returns the zero points of the per-channel quantized tensor</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.q_per_channel_axis" title="torch.Tensor.q_per_channel_axis"><code class="xref py py-meth docutils literal notranslate"><span class="pre">q_per_channel_axis()</span></code></a> — Returns the channel axis of the per-channel quantized tensor</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.resize_" title="torch.Tensor.resize_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">resize_()</span></code></a> — In-place resize</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.sort" title="torch.Tensor.sort"><code class="xref py py-meth docutils literal notranslate"><span class="pre">sort()</span></code></a> — Sorts the tensor</p></li>
<li><p><a class="reference internal" href="tensors.html#torch.Tensor.topk" title="torch.Tensor.topk"><code class="xref py py-meth docutils literal notranslate"><span class="pre">topk()</span></code></a> — Returns k largest values of a tensor</p></li>
</ul>
</div>
<div class="section" id="torch-nn-functional">
<h3><code class="docutils literal notranslate"><span class="pre">torch.nn.functional</span></code><a class="headerlink" href="#torch-nn-functional" title="Permalink to this headline">¶</a></h3>
<p>Basic activations are supported.</p>
<ul class="simple">
<li><p><a class="reference internal" href="nn.functional.html#torch.nn.functional.relu" title="torch.nn.functional.relu"><code class="xref py py-meth docutils literal notranslate"><span class="pre">relu()</span></code></a> — Rectified linear unit (copy)</p></li>
<li><p><a class="reference internal" href="nn.functional.html#torch.nn.functional.relu_" title="torch.nn.functional.relu_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">relu_()</span></code></a> — Rectified linear unit (inplace)</p></li>
<li><p><a class="reference internal" href="nn.functional.html#torch.nn.functional.max_pool2d" title="torch.nn.functional.max_pool2d"><code class="xref py py-meth docutils literal notranslate"><span class="pre">max_pool2d()</span></code></a> - Maximum pooling</p></li>
<li><p><a class="reference internal" href="nn.functional.html#torch.nn.functional.adaptive_avg_pool2d" title="torch.nn.functional.adaptive_avg_pool2d"><code class="xref py py-meth docutils literal notranslate"><span class="pre">adaptive_avg_pool2d()</span></code></a> - Adaptive average pooling</p></li>
<li><p><a class="reference internal" href="nn.functional.html#torch.nn.functional.avg_pool2d" title="torch.nn.functional.avg_pool2d"><code class="xref py py-meth docutils literal notranslate"><span class="pre">avg_pool2d()</span></code></a> - Average pooling</p></li>
<li><p><a class="reference internal" href="nn.functional.html#torch.nn.functional.interpolate" title="torch.nn.functional.interpolate"><code class="xref py py-meth docutils literal notranslate"><span class="pre">interpolate()</span></code></a> - Interpolation</p></li>
<li><p><a class="reference internal" href="nn.functional.html#torch.nn.functional.upsample" title="torch.nn.functional.upsample"><code class="xref py py-meth docutils literal notranslate"><span class="pre">upsample()</span></code></a> - Upsampling</p></li>
<li><p><a class="reference internal" href="nn.functional.html#torch.nn.functional.upsample_bilinear" title="torch.nn.functional.upsample_bilinear"><code class="xref py py-meth docutils literal notranslate"><span class="pre">upsample_bilinear()</span></code></a> - Bilinear Upsampling</p></li>
<li><p><a class="reference internal" href="nn.functional.html#torch.nn.functional.upsample_nearest" title="torch.nn.functional.upsample_nearest"><code class="xref py py-meth docutils literal notranslate"><span class="pre">upsample_nearest()</span></code></a> - Upsampling Nearest</p></li>
</ul>
</div>
<div class="section" id="torch-nn-intrinsic">
<h3><code class="docutils literal notranslate"><span class="pre">torch.nn.intrinsic</span></code><a class="headerlink" href="#torch-nn-intrinsic" title="Permalink to this headline">¶</a></h3>
<p>Fused modules are provided for common patterns in CNNs. Combining several operations together (like convolution and relu) allows for better quantization accuracy</p>
<ul class="simple">
<li><dl class="simple">
<dt><code class="docutils literal notranslate"><span class="pre">torch.nn.intrinsic</span></code> — float versions of the modules, can be swapped with quantized version 1 to 1</dt><dd><ul>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.ConvBn2d" title="torch.nn.intrinsic.ConvBn2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">ConvBn2d</span></code></a> — Conv2d + BatchNorm</p></li>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.ConvBnReLU2d" title="torch.nn.intrinsic.ConvBnReLU2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">ConvBnReLU2d</span></code></a> — Conv2d + BatchNorm + ReLU</p></li>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.ConvReLU2d" title="torch.nn.intrinsic.ConvReLU2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">ConvReLU2d</span></code></a> — Conv2d + ReLU</p></li>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.ConvReLU3d" title="torch.nn.intrinsic.ConvReLU3d"><code class="xref py py-class docutils literal notranslate"><span class="pre">ConvReLU3d</span></code></a> — Conv3d + ReLU</p></li>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.LinearReLU" title="torch.nn.intrinsic.LinearReLU"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearReLU</span></code></a> — Linear + ReLU</p></li>
</ul>
</dd>
</dl>
</li>
<li><dl class="simple">
<dt><code class="docutils literal notranslate"><span class="pre">torch.nn.intrinsic.qat</span></code> — versions of layers for quantization-aware training</dt><dd><ul>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.qat.ConvBn2d" title="torch.nn.intrinsic.qat.ConvBn2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">ConvBn2d</span></code></a> — Conv2d + BatchNorm</p></li>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.qat.ConvBnReLU2d" title="torch.nn.intrinsic.qat.ConvBnReLU2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">ConvBnReLU2d</span></code></a> — Conv2d + BatchNorm + ReLU</p></li>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.qat.ConvReLU2d" title="torch.nn.intrinsic.qat.ConvReLU2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">ConvReLU2d</span></code></a> — Conv2d + ReLU</p></li>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.qat.LinearReLU" title="torch.nn.intrinsic.qat.LinearReLU"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearReLU</span></code></a> — Linear + ReLU</p></li>
</ul>
</dd>
</dl>
</li>
<li><dl class="simple">
<dt><code class="docutils literal notranslate"><span class="pre">torch.nn.intrinsic.quantized</span></code> — quantized version of fused layers for inference (no BatchNorm variants as it’s usually folded into convolution for inference)</dt><dd><ul>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.quantized.LinearReLU" title="torch.nn.intrinsic.quantized.LinearReLU"><code class="xref py py-class docutils literal notranslate"><span class="pre">LinearReLU</span></code></a> — Linear + ReLU</p></li>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.quantized.ConvReLU2d" title="torch.nn.intrinsic.quantized.ConvReLU2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">ConvReLU2d</span></code></a> — 2D Convolution + ReLU</p></li>
<li><p><a class="reference internal" href="#torch.nn.intrinsic.quantized.ConvReLU3d" title="torch.nn.intrinsic.quantized.ConvReLU3d"><code class="xref py py-class docutils literal notranslate"><span class="pre">ConvReLU3d</span></code></a> — 3D Convolution + ReLU</p></li>
</ul>
</dd>
</dl>
</li>
</ul>
</div>
<div class="section" id="torch-nn-qat">
<h3><code class="docutils literal notranslate"><span class="pre">torch.nn.qat</span></code><a class="headerlink" href="#torch-nn-qat" title="Permalink to this headline">¶</a></h3>
<p>Layers for the quantization-aware training</p>
<ul class="simple">
<li><p><a class="reference internal" href="#torch.nn.qat.Linear" title="torch.nn.qat.Linear"><code class="xref py py-class docutils literal notranslate"><span class="pre">Linear</span></code></a> — Linear (fully-connected) layer</p></li>
<li><p><a class="reference internal" href="#torch.nn.qat.Conv2d" title="torch.nn.qat.Conv2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">Conv2d</span></code></a> — 2D convolution</p></li>
</ul>
</div>
<div class="section" id="torch-quantization">
<h3><code class="docutils literal notranslate"><span class="pre">torch.quantization</span></code><a class="headerlink" href="#torch-quantization" title="Permalink to this headline">¶</a></h3>
<ul class="simple">
<li><dl class="simple">
<dt>Functions for quantization</dt><dd><ul>
<li><p><a class="reference internal" href="#torch.quantization.add_observer_" title="torch.quantization.add_observer_"><code class="xref py py-func docutils literal notranslate"><span class="pre">add_observer_()</span></code></a> — Adds observer for the leaf modules (if quantization configuration is provided)</p></li>
<li><p><a class="reference internal" href="#torch.quantization.add_quant_dequant" title="torch.quantization.add_quant_dequant"><code class="xref py py-func docutils literal notranslate"><span class="pre">add_quant_dequant()</span></code></a>— Wraps the leaf child module using <a class="reference internal" href="#torch.quantization.QuantWrapper" title="torch.quantization.QuantWrapper"><code class="xref py py-class docutils literal notranslate"><span class="pre">QuantWrapper</span></code></a></p></li>
<li><p><a class="reference internal" href="#torch.quantization.convert" title="torch.quantization.convert"><code class="xref py py-func docutils literal notranslate"><span class="pre">convert()</span></code></a> — Converts float module with observers into its quantized counterpart. Must have quantization configuration</p></li>
<li><p><a class="reference internal" href="#torch.quantization.get_observer_dict" title="torch.quantization.get_observer_dict"><code class="xref py py-func docutils literal notranslate"><span class="pre">get_observer_dict()</span></code></a> — Traverses the module children and collects all observers into a <code class="docutils literal notranslate"><span class="pre">dict</span></code></p></li>
<li><p><a class="reference internal" href="#torch.quantization.prepare" title="torch.quantization.prepare"><code class="xref py py-func docutils literal notranslate"><span class="pre">prepare()</span></code></a> — Prepares a copy of a model for quantization</p></li>
<li><p><a class="reference internal" href="#torch.quantization.prepare_qat" title="torch.quantization.prepare_qat"><code class="xref py py-func docutils literal notranslate"><span class="pre">prepare_qat()</span></code></a> — Prepares a copy of a model for quantization aware training</p></li>
<li><p><a class="reference internal" href="#torch.quantization.propagate_qconfig_" title="torch.quantization.propagate_qconfig_"><code class="xref py py-func docutils literal notranslate"><span class="pre">propagate_qconfig_()</span></code></a> — Propagates quantization configurations through the module hierarchy and assign them to each leaf module</p></li>
<li><p><a class="reference internal" href="#torch.quantization.quantize" title="torch.quantization.quantize"><code class="xref py py-func docutils literal notranslate"><span class="pre">quantize()</span></code></a> — Converts a float module to quantized version</p></li>
<li><p><a class="reference internal" href="#torch.quantization.quantize_dynamic" title="torch.quantization.quantize_dynamic"><code class="xref py py-func docutils literal notranslate"><span class="pre">quantize_dynamic()</span></code></a> — Converts a float module to dynamically quantized version</p></li>
<li><p><a class="reference internal" href="#torch.quantization.quantize_qat" title="torch.quantization.quantize_qat"><code class="xref py py-func docutils literal notranslate"><span class="pre">quantize_qat()</span></code></a>— Converts a float module to quantized version used in quantization aware training</p></li>
<li><p><a class="reference internal" href="#torch.quantization.swap_module" title="torch.quantization.swap_module"><code class="xref py py-func docutils literal notranslate"><span class="pre">swap_module()</span></code></a> — Swaps the module with its quantized counterpart (if quantizable and if it has an observer)</p></li>
</ul>
</dd>
</dl>
</li>
<li><p><a class="reference internal" href="#torch.quantization.default_eval_fn" title="torch.quantization.default_eval_fn"><code class="xref py py-func docutils literal notranslate"><span class="pre">default_eval_fn()</span></code></a> — Default evaluation function used by the <a class="reference internal" href="#torch.quantization.quantize" title="torch.quantization.quantize"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.quantization.quantize()</span></code></a></p></li>
<li><p><a class="reference internal" href="#torch.quantization.fuse_modules" title="torch.quantization.fuse_modules"><code class="xref py py-func docutils literal notranslate"><span class="pre">fuse_modules()</span></code></a></p></li>
<li><p><a class="reference internal" href="#torch.quantization.FakeQuantize" title="torch.quantization.FakeQuantize"><code class="xref py py-class docutils literal notranslate"><span class="pre">FakeQuantize</span></code></a> — Module for simulating the quantization/dequantization at training time</p></li>
<li><dl class="simple">
<dt>Default Observers. The rest of observers are available from <code class="docutils literal notranslate"><span class="pre">torch.quantization.observer</span></code></dt><dd><ul>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">default_observer</span></code> — Same as <code class="docutils literal notranslate"><span class="pre">MinMaxObserver.with_args(reduce_range=True)</span></code></p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">default_weight_observer</span></code> — Same as <code class="docutils literal notranslate"><span class="pre">MinMaxObserver.with_args(dtype=torch.qint8,</span> <span class="pre">qscheme=torch.per_tensor_symmetric)</span></code></p></li>
<li><p><code class="xref py py-class docutils literal notranslate"><span class="pre">Observer</span></code> — Abstract base class for observers</p></li>
</ul>
</dd>
</dl>
</li>
<li><dl class="simple">
<dt>Quantization configurations</dt><dd><ul>
<li><p><a class="reference internal" href="#torch.quantization.QConfig" title="torch.quantization.QConfig"><code class="xref py py-class docutils literal notranslate"><span class="pre">QConfig</span></code></a> — Quantization configuration class</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">default_qconfig</span></code> — Same as <code class="docutils literal notranslate"><span class="pre">QConfig(activation=default_observer,</span> <span class="pre">weight=default_weight_observer)</span></code> (See <code class="xref py py-class docutils literal notranslate"><span class="pre">QConfig</span></code>)</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">default_qat_qconfig</span></code> — Same as <code class="docutils literal notranslate"><span class="pre">QConfig(activation=default_fake_quant,</span> <span class="pre">weight=default_weight_fake_quant)</span></code> (See <code class="xref py py-class docutils literal notranslate"><span class="pre">QConfig</span></code>)</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">default_dynamic_qconfig</span></code> — Same as <code class="docutils literal notranslate"><span class="pre">QConfigDynamic(weight=default_weight_observer)</span></code> (See <code class="xref py py-class docutils literal notranslate"><span class="pre">QConfigDynamic</span></code>)</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">float16_dynamic_qconfig</span></code> — Same as <code class="docutils literal notranslate"><span class="pre">QConfigDynamic(weight=NoopObserver.with_args(dtype=torch.float16))</span></code> (See <code class="xref py py-class docutils literal notranslate"><span class="pre">QConfigDynamic</span></code>)</p></li>
</ul>
</dd>
</dl>
</li>
<li><dl class="simple">
<dt>Stubs</dt><dd><ul>
<li><p><a class="reference internal" href="#torch.quantization.DeQuantStub" title="torch.quantization.DeQuantStub"><code class="xref py py-class docutils literal notranslate"><span class="pre">DeQuantStub</span></code></a> - placeholder module for dequantize() operation in float-valued models</p></li>
<li><p><a class="reference internal" href="#torch.quantization.QuantStub" title="torch.quantization.QuantStub"><code class="xref py py-class docutils literal notranslate"><span class="pre">QuantStub</span></code></a> - placeholder module for quantize() operation in float-valued models</p></li>
<li><p><a class="reference internal" href="#torch.quantization.QuantWrapper" title="torch.quantization.QuantWrapper"><code class="xref py py-class docutils literal notranslate"><span class="pre">QuantWrapper</span></code></a> — wraps the module to be quantized. Inserts the <a class="reference internal" href="#torch.quantization.QuantStub" title="torch.quantization.QuantStub"><code class="xref py py-class docutils literal notranslate"><span class="pre">QuantStub</span></code></a> and <a class="reference internal" href="#torch.quantization.DeQuantStub" title="torch.quantization.DeQuantStub"><code class="xref py py-class docutils literal notranslate"><span class="pre">DeQuantStub</span></code></a></p></li>
</ul>
</dd>
</dl>
</li>
</ul>
<p>Observers for computing the quantization parameters</p>
<ul class="simple">
<li><p><a class="reference internal" href="#torch.quantization.MinMaxObserver" title="torch.quantization.MinMaxObserver"><code class="xref py py-class docutils literal notranslate"><span class="pre">MinMaxObserver</span></code></a> — Derives the quantization parameters from the running minimum and maximum of the observed tensor inputs (per tensor variant)</p></li>
<li><p><a class="reference internal" href="#torch.quantization.MovingAverageMinMaxObserver" title="torch.quantization.MovingAverageMinMaxObserver"><code class="xref py py-class docutils literal notranslate"><span class="pre">MovingAverageMinMaxObserver</span></code></a> — Derives the quantization parameters from the running averages of the minimums and maximums of the observed tensor inputs (per tensor variant)</p></li>
<li><p><a class="reference internal" href="#torch.quantization.PerChannelMinMaxObserver" title="torch.quantization.PerChannelMinMaxObserver"><code class="xref py py-class docutils literal notranslate"><span class="pre">PerChannelMinMaxObserver</span></code></a>— Derives the quantization parameters from the running minimum and maximum of the observed tensor inputs (per channel variant)</p></li>
<li><p><a class="reference internal" href="#torch.quantization.MovingAveragePerChannelMinMaxObserver" title="torch.quantization.MovingAveragePerChannelMinMaxObserver"><code class="xref py py-class docutils literal notranslate"><span class="pre">MovingAveragePerChannelMinMaxObserver</span></code></a> — Derives the quantization parameters from the running averages of the minimums and maximums of the observed tensor inputs (per channel variant)</p></li>
<li><p><a class="reference internal" href="#torch.quantization.HistogramObserver" title="torch.quantization.HistogramObserver"><code class="xref py py-class docutils literal notranslate"><span class="pre">HistogramObserver</span></code></a> — Derives the quantization parameters by creating a histogram of running minimums and maximums.</p></li>
<li><dl class="simple">
<dt>Observers that do not compute the quantization parameters:</dt><dd><ul>
<li><p><a class="reference internal" href="#torch.quantization.RecordingObserver" title="torch.quantization.RecordingObserver"><code class="xref py py-class docutils literal notranslate"><span class="pre">RecordingObserver</span></code></a> — Records all incoming tensors. Used for debugging only.</p></li>
<li><p><a class="reference internal" href="#torch.quantization.NoopObserver" title="torch.quantization.NoopObserver"><code class="xref py py-class docutils literal notranslate"><span class="pre">NoopObserver</span></code></a> — Pass-through observer. Used for situation when there are no quantization parameters (i.e. quantization to <code class="docutils literal notranslate"><span class="pre">float16</span></code>)</p></li>
</ul>
</dd>
</dl>
</li>
</ul>
</div>
<div class="section" id="torch-nn-quantized">
<h3><code class="docutils literal notranslate"><span class="pre">torch.nn.quantized</span></code><a class="headerlink" href="#torch-nn-quantized" title="Permalink to this headline">¶</a></h3>
<p>Quantized version of standard NN layers.</p>
<ul class="simple">
<li><p><a class="reference internal" href="#torch.nn.quantized.Quantize" title="torch.nn.quantized.Quantize"><code class="xref py py-class docutils literal notranslate"><span class="pre">Quantize</span></code></a> — Quantization layer, used to automatically replace <a class="reference internal" href="#torch.quantization.QuantStub" title="torch.quantization.QuantStub"><code class="xref py py-class docutils literal notranslate"><span class="pre">QuantStub</span></code></a></p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.DeQuantize" title="torch.nn.quantized.DeQuantize"><code class="xref py py-class docutils literal notranslate"><span class="pre">DeQuantize</span></code></a> — Dequantization layer, used to replace <a class="reference internal" href="#torch.quantization.DeQuantStub" title="torch.quantization.DeQuantStub"><code class="xref py py-class docutils literal notranslate"><span class="pre">DeQuantStub</span></code></a></p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.FloatFunctional" title="torch.nn.quantized.FloatFunctional"><code class="xref py py-class docutils literal notranslate"><span class="pre">FloatFunctional</span></code></a> — Wrapper class to make stateless float operations stateful so that they can be replaced with quantized versions</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.QFunctional" title="torch.nn.quantized.QFunctional"><code class="xref py py-class docutils literal notranslate"><span class="pre">QFunctional</span></code></a> — Wrapper class for quantized versions of stateless operations like <code class="docutils literal notranslate"><span class="pre">torch.add</span></code></p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.Conv2d" title="torch.nn.quantized.Conv2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">Conv2d</span></code></a> — 2D convolution</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.Conv3d" title="torch.nn.quantized.Conv3d"><code class="xref py py-class docutils literal notranslate"><span class="pre">Conv3d</span></code></a> — 3D convolution</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.Linear" title="torch.nn.quantized.Linear"><code class="xref py py-class docutils literal notranslate"><span class="pre">Linear</span></code></a> — Linear (fully-connected) layer</p></li>
<li><p><a class="reference internal" href="nn.html#torch.nn.MaxPool2d" title="torch.nn.MaxPool2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">MaxPool2d</span></code></a> — 2D max pooling</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.ReLU" title="torch.nn.quantized.ReLU"><code class="xref py py-class docutils literal notranslate"><span class="pre">ReLU</span></code></a> — Rectified linear unit</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.ReLU6" title="torch.nn.quantized.ReLU6"><code class="xref py py-class docutils literal notranslate"><span class="pre">ReLU6</span></code></a> — Rectified linear unit with cut-off at quantized representation of 6</p></li>
</ul>
</div>
<div class="section" id="torch-nn-quantized-dynamic">
<h3><code class="docutils literal notranslate"><span class="pre">torch.nn.quantized.dynamic</span></code><a class="headerlink" href="#torch-nn-quantized-dynamic" title="Permalink to this headline">¶</a></h3>
<p>Layers used in dynamically quantized models (i.e. quantized only on weights)</p>
<ul class="simple">
<li><p><a class="reference internal" href="#torch.nn.quantized.dynamic.Linear" title="torch.nn.quantized.dynamic.Linear"><code class="xref py py-class docutils literal notranslate"><span class="pre">Linear</span></code></a> — Linear (fully-connected) layer</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.dynamic.LSTM" title="torch.nn.quantized.dynamic.LSTM"><code class="xref py py-class docutils literal notranslate"><span class="pre">LSTM</span></code></a> — Long-Short Term Memory RNN module</p></li>
</ul>
</div>
<div class="section" id="torch-nn-quantized-functional">
<h3><code class="docutils literal notranslate"><span class="pre">torch.nn.quantized.functional</span></code><a class="headerlink" href="#torch-nn-quantized-functional" title="Permalink to this headline">¶</a></h3>
<p>Functional versions of quantized NN layers (many of them accept explicit quantization output parameters)</p>
<ul class="simple">
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.adaptive_avg_pool2d" title="torch.nn.quantized.functional.adaptive_avg_pool2d"><code class="xref py py-func docutils literal notranslate"><span class="pre">adaptive_avg_pool2d()</span></code></a> — 2D adaptive average pooling</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.avg_pool2d" title="torch.nn.quantized.functional.avg_pool2d"><code class="xref py py-func docutils literal notranslate"><span class="pre">avg_pool2d()</span></code></a> — 2D average pooling</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.conv2d" title="torch.nn.quantized.functional.conv2d"><code class="xref py py-func docutils literal notranslate"><span class="pre">conv2d()</span></code></a> — 2D convolution</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.conv3d" title="torch.nn.quantized.functional.conv3d"><code class="xref py py-func docutils literal notranslate"><span class="pre">conv3d()</span></code></a> — 3D convolution</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.interpolate" title="torch.nn.quantized.functional.interpolate"><code class="xref py py-func docutils literal notranslate"><span class="pre">interpolate()</span></code></a> — Down-/up- sampler</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.linear" title="torch.nn.quantized.functional.linear"><code class="xref py py-func docutils literal notranslate"><span class="pre">linear()</span></code></a> — Linear (fully-connected) op</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.max_pool2d" title="torch.nn.quantized.functional.max_pool2d"><code class="xref py py-func docutils literal notranslate"><span class="pre">max_pool2d()</span></code></a> — 2D max pooling</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.relu" title="torch.nn.quantized.functional.relu"><code class="xref py py-func docutils literal notranslate"><span class="pre">relu()</span></code></a> — Rectified linear unit</p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.upsample" title="torch.nn.quantized.functional.upsample"><code class="xref py py-func docutils literal notranslate"><span class="pre">upsample()</span></code></a> — Upsampler. Will be deprecated in favor of <a class="reference internal" href="#torch.nn.quantized.functional.interpolate" title="torch.nn.quantized.functional.interpolate"><code class="xref py py-func docutils literal notranslate"><span class="pre">interpolate()</span></code></a></p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.upsample_bilinear" title="torch.nn.quantized.functional.upsample_bilinear"><code class="xref py py-func docutils literal notranslate"><span class="pre">upsample_bilinear()</span></code></a> — Bilenear upsampler. Will be deprecated in favor of <a class="reference internal" href="#torch.nn.quantized.functional.interpolate" title="torch.nn.quantized.functional.interpolate"><code class="xref py py-func docutils literal notranslate"><span class="pre">interpolate()</span></code></a></p></li>
<li><p><a class="reference internal" href="#torch.nn.quantized.functional.upsample_nearest" title="torch.nn.quantized.functional.upsample_nearest"><code class="xref py py-func docutils literal notranslate"><span class="pre">upsample_nearest()</span></code></a> — Nearest neighbor upsampler. Will be deprecated in favor of <a class="reference internal" href="#torch.nn.quantized.functional.interpolate" title="torch.nn.quantized.functional.interpolate"><code class="xref py py-func docutils literal notranslate"><span class="pre">interpolate()</span></code></a></p></li>
</ul>
</div>
<div class="section" id="quantized-dtypes-and-quantization-schemes">
<h3>Quantized dtypes and quantization schemes<a class="headerlink" href="#quantized-dtypes-and-quantization-schemes" title="Permalink to this headline">¶</a></h3>
<ul class="simple">
<li><dl class="simple">
<dt><code class="xref py py-attr docutils literal notranslate"><span class="pre">torch.qscheme</span></code> — Type to describe the quantization scheme of a tensor. Supported types:</dt><dd><ul>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">torch.per_tensor_affine</span></code> — per tensor, asymmetric</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">torch.per_channel_affine</span></code> — per channel, asymmetric</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">torch.per_tensor_symmetric</span></code> — per tensor, symmetric</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">torch.per_channel_symmetric</span></code> — per tensor, symmetric</p></li>
</ul>
</dd>
</dl>
</li>
<li><dl class="simple">
<dt><code class="docutils literal notranslate"><span class="pre">torch.dtype</span></code> — Type to describe the data. Supported types:</dt><dd><ul>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">torch.quint8</span></code> — 8-bit unsigned integer</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">torch.qint8</span></code> — 8-bit signed integer</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">torch.qint32</span></code> — 32-bit signed integer</p></li>
</ul>
</dd>
</dl>
</li>
</ul>
</div>
</div>
<div class="section" id="quantization-workflows">
<h2>Quantization Workflows<a class="headerlink" href="#quantization-workflows" title="Permalink to this headline">¶</a></h2>
<p>PyTorch provides three approaches to quantize models.</p>
<ol class="arabic">
<li><p>Post Training Dynamic Quantization: This is the simplest to apply form of
quantization where the weights are quantized ahead of time but the
activations are dynamically quantized  during inference. This is used
for situations where the model execution time is dominated by loading
weights from memory rather than computing the matrix multiplications.
This is true for for LSTM and Transformer type models with small
batch size. Applying dynamic quantization to a whole model can be
done with a single call to <a class="reference internal" href="#torch.quantization.quantize_dynamic" title="torch.quantization.quantize_dynamic"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.quantization.quantize_dynamic()</span></code></a>.
See the <a class="reference external" href="https://pytorch.org/tutorials/#quantization-experimental">quantization tutorials</a></p></li>
<li><p>Post Training Static Quantization: This is the most commonly used form of
quantization where the weights are quantized ahead of time and the
scale factor and bias for the activation tensors is pre-computed
based on observing the behavior of the model during a calibration
process. Post Training Quantization is typically when both memory bandwidth and compute
savings are important with CNNs being a typical use case.
The general process for doing post training quantization is:</p>
<ol class="arabic simple">
<li><p>Prepare the model:
a. Specify where the activations are quantized and dequantized explicitly by adding QuantStub and DeQuantStub modules.
b. Ensure that modules are not reused.
c. Convert any operations that require requantization into modules</p></li>
<li><p>Fuse operations like conv + relu or conv+batchnorm + relu together to improve both model accuracy and performance.</p></li>
<li><p>Specify the configuration of the quantization methods ‘97 such as
selecting symmetric or asymmetric quantization and MinMax or
L2Norm calibration techniques.</p></li>
<li><p>Use the <a class="reference internal" href="#torch.quantization.prepare" title="torch.quantization.prepare"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.quantization.prepare()</span></code></a> to insert modules
that will observe activation tensors during calibration</p></li>
<li><p>Calibrate the model by running inference against a calibration
dataset</p></li>
<li><p>Finally, convert the model itself with the
torch.quantization.convert() method. This does several things: it
quantizes the weights, computes and stores the scale and bias
value to be used each activation tensor, and replaces key
operators quantized implementations.</p></li>
</ol>
<p>See the <a class="reference external" href="https://pytorch.org/tutorials/#quantization-experimental">quantization tutorials</a></p>
</li>
<li><p>Quantization Aware Training: In the rare cases where post training
quantization does not provide adequate accuracy training can be done
with simulated quantization using the <a class="reference internal" href="#torch.quantization.FakeQuantize" title="torch.quantization.FakeQuantize"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.quantization.FakeQuantize</span></code></a>. Computations
will take place in FP32 but with values clamped and rounded to
simulate the effects of INT8 quantization. The sequence of steps is
very similar.</p>
<ol class="arabic simple">
<li><p>Steps (1) and (2) are identical.</p></li>
</ol>
<ol class="arabic simple" start="3">
<li><p>Specify the configuration of the fake quantization methods ‘97 such as
selecting symmetric or asymmetric quantization and MinMax or Moving Average
or L2Norm calibration techniques.</p></li>
<li><p>Use the <a class="reference internal" href="#torch.quantization.prepare_qat" title="torch.quantization.prepare_qat"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.quantization.prepare_qat()</span></code></a> to insert modules
that will simulate quantization during training.</p></li>
<li><p>Train or fine tune the model.</p></li>
<li><p>Identical to step (6) for post training quantization</p></li>
</ol>
<p>See the <a class="reference external" href="https://pytorch.org/tutorials/#quantization-experimental">quantization tutorials</a></p>
</li>
</ol>
<p>While default implementations of observers to select the scale factor and bias
based on observed tensor data are provided, developers can provide their own
quantization functions. Quantization can be applied selectively to different
parts of the model or configured differently for different parts of the model.</p>
<p>We also provide support for per channel quantization for <strong>conv2d()</strong>,
<strong>conv3d()</strong> and <strong>linear()</strong></p>
<p>Quantization workflows work by adding (e.g. adding observers as
<code class="docutils literal notranslate"><span class="pre">.observer</span></code> submodule) or replacing (e.g. converting <code class="docutils literal notranslate"><span class="pre">nn.Conv2d</span></code> to
<code class="docutils literal notranslate"><span class="pre">nn.quantized.Conv2d</span></code>) submodules in the model’s module hierarchy. It
means that the model stays a regular <code class="docutils literal notranslate"><span class="pre">nn.Module</span></code>-based instance throughout the
process and thus can work with the rest of PyTorch APIs.</p>
</div>
<div class="section" id="model-preparation-for-quantization">
<h2>Model Preparation for Quantization<a class="headerlink" href="#model-preparation-for-quantization" title="Permalink to this headline">¶</a></h2>
<p>It is necessary to currently make some modifications to the model definition
prior to quantization. This is because currently quantization works on a module
by module basis. Specifically, for all quantization techniques, the user needs to:</p>
<ol class="arabic simple">
<li><p>Convert any operations that require output requantization (and thus have additional parameters) from functionals to module form.</p></li>
<li><p>Specify which parts of the model need to be quantized either by assigning <code class="docutils literal notranslate"><span class="pre">`.qconfig</span></code> attributes on submodules or by specifying <code class="docutils literal notranslate"><span class="pre">qconfig_dict</span></code></p></li>
</ol>
<p>For static quantization techniques which quantize activations, the user needs to do the following in addition:</p>
<ol class="arabic simple">
<li><p>Specify where activations are quantized and de-quantized. This is done using <a class="reference internal" href="#torch.quantization.QuantStub" title="torch.quantization.QuantStub"><code class="xref py py-class docutils literal notranslate"><span class="pre">QuantStub</span></code></a> and <a class="reference internal" href="#torch.quantization.DeQuantStub" title="torch.quantization.DeQuantStub"><code class="xref py py-class docutils literal notranslate"><span class="pre">DeQuantStub</span></code></a> modules.</p></li>
<li><p>Use <a class="reference internal" href="#torch.nn.quantized.FloatFunctional" title="torch.nn.quantized.FloatFunctional"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.quantized.FloatFunctional</span></code></a> to wrap tensor operations that require special handling for quantization into modules. Examples
are operations like <code class="docutils literal notranslate"><span class="pre">add</span></code> and <code class="docutils literal notranslate"><span class="pre">cat</span></code> which require special handling to determine output quantization parameters.</p></li>
<li><p>Fuse modules: combine operations/modules into a single module to obtain higher accuracy and performance. This is done using the
<a class="reference internal" href="#torch.quantization.fuse_modules" title="torch.quantization.fuse_modules"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.quantization.fuse_modules()</span></code></a> API, which takes in lists of modules to be fused. We currently support the following fusions:
[Conv, Relu], [Conv, BatchNorm], [Conv, BatchNorm, Relu], [Linear, Relu]</p></li>
</ol>
</div>
<div class="section" id="id1">
<h2>torch.quantization<a class="headerlink" href="#id1" title="Permalink to this headline">¶</a></h2>
<span class="target" id="module-torch.quantization"></span><p>This module implements the functions you call
directly to convert your model from FP32 to quantized form. For
example the <a class="reference internal" href="#torch.quantization.prepare" title="torch.quantization.prepare"><code class="xref py py-func docutils literal notranslate"><span class="pre">prepare()</span></code></a> is used in post training
quantization to prepares your model for the calibration step and
<a class="reference internal" href="#torch.quantization.convert" title="torch.quantization.convert"><code class="xref py py-func docutils literal notranslate"><span class="pre">convert()</span></code></a> actually converts the weights to int8 and
replaces the operations with their quantized counterparts. There are
other helper functions for things like quantizing the input to your
model and performing critical fusions like conv+relu.</p>
<div class="section" id="top-level-quantization-apis">
<h3>Top-level quantization APIs<a class="headerlink" href="#top-level-quantization-apis" title="Permalink to this headline">¶</a></h3>
<dl class="function">
<dt id="torch.quantization.quantize">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">quantize</code><span class="sig-paren">(</span><em class="sig-param">model</em>, <em class="sig-param">run_fn</em>, <em class="sig-param">run_args</em>, <em class="sig-param">mapping=None</em>, <em class="sig-param">inplace=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#quantize"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.quantize" title="Permalink to this definition">¶</a></dt>
<dd><p>Converts a float model to quantized model.</p>
<p>First it will prepare the model for calibration or training, then it calls
<cite>run_fn</cite> which will run the calibration step or training step,
after that we will call <cite>convert</cite> which will convert the model to a
quantized model.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>model</strong> – input model</p></li>
<li><p><strong>run_fn</strong> – a function for evaluating the prepared model, can be a
function that simply runs the prepared model or a training loop</p></li>
<li><p><strong>run_args</strong> – positional arguments for <cite>run_fn</cite></p></li>
<li><p><strong>inplace</strong> – carry out model transformations in-place, the original module is mutated</p></li>
<li><p><strong>mapping</strong> – correspondence between original module types and quantized counterparts</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>Quantized model.</p>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.quantization.quantize_dynamic">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">quantize_dynamic</code><span class="sig-paren">(</span><em class="sig-param">model</em>, <em class="sig-param">qconfig_spec=None</em>, <em class="sig-param">dtype=torch.qint8</em>, <em class="sig-param">mapping=None</em>, <em class="sig-param">inplace=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#quantize_dynamic"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.quantize_dynamic" title="Permalink to this definition">¶</a></dt>
<dd><p>Converts a float model to dynamic (i.e. weights-only) quantized model.</p>
<p>Replaces specified modules with dynamic weight-only quantized versions and output the quantized model.</p>
<p>For simplest usage provide <cite>dtype</cite> argument that can be float16 or qint8. Weight-only quantization
by default is performed for layers with large weights size - i.e. Linear and RNN variants.</p>
<p>Fine grained control is possible with <cite>qconfig</cite> and <cite>mapping</cite> that act similarly to <cite>quantize()</cite>.
If <cite>qconfig</cite> is provided, the <cite>dtype</cite> argument is ignored.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>module</strong> – input model</p></li>
<li><p><strong>qconfig_spec</strong> – <p>Either:</p>
<ul>
<li><p>A dictionary that maps from name or type of submodule to quantization
configuration, qconfig applies to all submodules of a given
module unless qconfig for the submodules are specified (when the
submodule already has qconfig attribute). Entries in the dictionary
need to be QConfigDynamic instances.</p></li>
<li><p>A set of types and/or submodule names to apply dynamic quantization to,
in which case the <cite>dtype</cite> argument is used to specifiy the bit-width</p></li>
</ul>
</p></li>
<li><p><strong>inplace</strong> – carry out model transformations in-place, the original module is mutated</p></li>
<li><p><strong>mapping</strong> – maps type of a submodule to a type of corresponding dynamically quantized version
with which the submodule needs to be replaced</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.quantization.quantize_qat">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">quantize_qat</code><span class="sig-paren">(</span><em class="sig-param">model</em>, <em class="sig-param">run_fn</em>, <em class="sig-param">run_args</em>, <em class="sig-param">inplace=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#quantize_qat"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.quantize_qat" title="Permalink to this definition">¶</a></dt>
<dd><p>Do quantization aware training and output a quantized model</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>model</strong> – input model</p></li>
<li><p><strong>run_fn</strong> – a function for evaluating the prepared model, can be a
function that simply runs the prepared model or a training
loop</p></li>
<li><p><strong>run_args</strong> – positional arguments for <cite>run_fn</cite></p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>Quantized model.</p>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.quantization.prepare">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">prepare</code><span class="sig-paren">(</span><em class="sig-param">model</em>, <em class="sig-param">inplace=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#prepare"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.prepare" title="Permalink to this definition">¶</a></dt>
<dd><p>Prepares a copy of the model for quantization calibration or quantization-aware training.</p>
<p>Quantization configuration should be assigned preemptively
to individual submodules in <cite>.qconfig</cite> attribute.</p>
<p>The model will be attached with observer or fake quant modules, and qconfig
will be propagated.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>model</strong> – input model to be modified in-place</p></li>
<li><p><strong>inplace</strong> – carry out model transformations in-place, the original module is mutated</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.quantization.prepare_qat">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">prepare_qat</code><span class="sig-paren">(</span><em class="sig-param">model</em>, <em class="sig-param">mapping=None</em>, <em class="sig-param">inplace=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#prepare_qat"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.prepare_qat" title="Permalink to this definition">¶</a></dt>
<dd><p>Prepares a copy of the model for quantization calibration or
quantization-aware training and convers it to quantized version.</p>
<p>Quantization configuration should be assigned preemptively
to individual submodules in <cite>.qconfig</cite> attribute.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>model</strong> – input model to be modified in-place</p></li>
<li><p><strong>mapping</strong> – dictionary that maps float modules to quantized modules to be
replaced.</p></li>
<li><p><strong>inplace</strong> – carry out model transformations in-place, the original module
is mutated</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.quantization.convert">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">convert</code><span class="sig-paren">(</span><em class="sig-param">module</em>, <em class="sig-param">mapping=None</em>, <em class="sig-param">inplace=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#convert"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.convert" title="Permalink to this definition">¶</a></dt>
<dd><p>Converts the float module with observers (where we can get quantization
parameters) to a quantized module.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>module</strong> – calibrated module with observers</p></li>
<li><p><strong>mapping</strong> – a dictionary that maps from float module type to quantized
module type, can be overwrritten to allow swapping user defined
Modules</p></li>
<li><p><strong>inplace</strong> – carry out model transformations in-place, the original module
is mutated</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.QConfig">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">QConfig</code><a class="reference internal" href="_modules/torch/quantization/qconfig.html#QConfig"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.QConfig" title="Permalink to this definition">¶</a></dt>
<dd><p>Describes how to quantize a layer or a part of the network by providing
settings (observer classes) for activations and weights respectively.</p>
<p>Note that QConfig needs to contain observer <strong>classes</strong> (like MinMaxObserver) or a callable that returns
instances on invocation, not the concrete observer instances themselves.
Quantization preparation function will instantiate observers multiple times for each of the layers.</p>
<p>Observer classes have usually reasonable default arguments, but they can be overwritten with <cite>with_args</cite>
method (that behaves like functools.partial):</p>
<blockquote>
<div><p>my_qconfig = QConfig(activation=MinMaxObserver.with_args(dtype=torch.qint8),
weight=default_observer.with_args(dtype=torch.qint8))</p>
</div></blockquote>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.QConfigDynamic">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">QConfigDynamic</code><a class="reference internal" href="_modules/torch/quantization/qconfig.html#QConfigDynamic"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.QConfigDynamic" title="Permalink to this definition">¶</a></dt>
<dd><p>Describes how to dynamically quantize a layer or a part of the network by providing
settings (observer classe) for weights.</p>
<p>It’s like QConfig, but for dynamic quantization.</p>
<p>Note that QConfigDynamic needs to contain observer <strong>classes</strong> (like MinMaxObserver) or a callable that returns
instances on invocation, not the concrete observer instances themselves.
Quantization function will instantiate observers multiple times for each of the layers.</p>
<p>Observer classes have usually reasonable default arguments, but they can be overwritten with <cite>with_args</cite>
method (that behaves like functools.partial):</p>
<blockquote>
<div><p>my_qconfig = QConfigDynamic(weight=default_observer.with_args(dtype=torch.qint8))</p>
</div></blockquote>
</dd></dl>

</div>
<div class="section" id="preparing-model-for-quantization">
<h3>Preparing model for quantization<a class="headerlink" href="#preparing-model-for-quantization" title="Permalink to this headline">¶</a></h3>
<dl class="function">
<dt id="torch.quantization.fuse_modules">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">fuse_modules</code><span class="sig-paren">(</span><em class="sig-param">model</em>, <em class="sig-param">modules_to_fuse</em>, <em class="sig-param">inplace=False</em>, <em class="sig-param">fuser_func=&lt;function fuse_known_modules&gt;</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/fuse_modules.html#fuse_modules"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.fuse_modules" title="Permalink to this definition">¶</a></dt>
<dd><p>Fuses a list of modules into a single module</p>
<p>Fuses only the following sequence of modules:</p>
<ul class="simple">
<li><p>conv, bn</p></li>
<li><p>conv, bn, relu</p></li>
<li><p>conv, relu</p></li>
<li><p>linear, relu</p></li>
</ul>
<p>All other sequences are left unchanged.
For these sequences, replaces the first item in the list
with the fused module, replacing the rest of the modules
with identity.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>model</strong> – Model containing the modules to be fused</p></li>
<li><p><strong>modules_to_fuse</strong> – list of list of module names to fuse. Can also be a list
of strings if there is only a single list of modules to fuse.</p></li>
<li><p><strong>inplace</strong> – bool specifying if fusion happens in place on the model, by default
a new model is returned</p></li>
<li><p><strong>fuser_func</strong> – Function that takes in a list of modules and outputs a list of fused modules
of the same length. For example,
fuser_func([convModule, BNModule]) returns the list [ConvBNModule, nn.Identity()]
Defaults to torch.quantization.fuse_known_modules</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>model with fused modules. A new copy is created if inplace=True.</p>
</dd>
</dl>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">myModel</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># m is a module containing  the sub-modules below</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">modules_to_fuse</span> <span class="o">=</span> <span class="p">[</span> <span class="p">[</span><span class="s1">&#39;conv1&#39;</span><span class="p">,</span> <span class="s1">&#39;bn1&#39;</span><span class="p">,</span> <span class="s1">&#39;relu1&#39;</span><span class="p">],</span> <span class="p">[</span><span class="s1">&#39;submodule.conv&#39;</span><span class="p">,</span> <span class="s1">&#39;submodule.relu&#39;</span><span class="p">]]</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">fused_m</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantization</span><span class="o">.</span><span class="n">fuse_modules</span><span class="p">(</span><span class="n">m</span><span class="p">,</span> <span class="n">modules_to_fuse</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">fused_m</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>

<span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">myModel</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># Alternately provide a single list of modules to fuse</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">modules_to_fuse</span> <span class="o">=</span> <span class="p">[</span><span class="s1">&#39;conv1&#39;</span><span class="p">,</span> <span class="s1">&#39;bn1&#39;</span><span class="p">,</span> <span class="s1">&#39;relu1&#39;</span><span class="p">]</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">fused_m</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantization</span><span class="o">.</span><span class="n">fuse_modules</span><span class="p">(</span><span class="n">m</span><span class="p">,</span> <span class="n">modules_to_fuse</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">fused_m</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.QuantStub">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">QuantStub</code><span class="sig-paren">(</span><em class="sig-param">qconfig=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/stubs.html#QuantStub"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.QuantStub" title="Permalink to this definition">¶</a></dt>
<dd><p>Quantize stub module, before calibration, this is same as an observer,
it will be swapped as <cite>nnq.Quantize</cite> in <cite>convert</cite>.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>qconfig</strong> – quantization configuration for the tensor,
if qconfig is not provided, we will get qconfig from parent modules</p>
</dd>
</dl>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.DeQuantStub">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">DeQuantStub</code><a class="reference internal" href="_modules/torch/quantization/stubs.html#DeQuantStub"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.DeQuantStub" title="Permalink to this definition">¶</a></dt>
<dd><p>Dequantize stub module, before calibration, this is same as identity,
this will be swapped as <cite>nnq.DeQuantize</cite> in <cite>convert</cite>.</p>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.QuantWrapper">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">QuantWrapper</code><span class="sig-paren">(</span><em class="sig-param">module</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/stubs.html#QuantWrapper"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.QuantWrapper" title="Permalink to this definition">¶</a></dt>
<dd><p>A wrapper class that wraps the input module, adds QuantStub and
DeQuantStub and surround the call to module with call to quant and dequant
modules.</p>
<p>This is used by the <cite>quantization</cite> utility functions to add the quant and
dequant modules, before <cite>convert</cite> function <cite>QuantStub</cite> will just be observer,
it observes the input tensor, after <cite>convert</cite>, <cite>QuantStub</cite>
will be swapped to <cite>nnq.Quantize</cite> which does actual quantization. Similarly
for <cite>DeQuantStub</cite>.</p>
</dd></dl>

<dl class="function">
<dt id="torch.quantization.add_quant_dequant">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">add_quant_dequant</code><span class="sig-paren">(</span><em class="sig-param">module</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#add_quant_dequant"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.add_quant_dequant" title="Permalink to this definition">¶</a></dt>
<dd><p>Wrap the leaf child module in QuantWrapper if it has a valid qconfig
Note that this function will modify the children of module inplace and it
can return a new module which wraps the input module as well.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>module</strong> – input module with qconfig attributes for all the leaf modules</p></li>
<li><p><strong>we want to quantize</strong> (<em>that</em>) – </p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>Either the inplace modified module with submodules wrapped in
<cite>QuantWrapper</cite> based on qconfig or a new <cite>QuantWrapper</cite> module which
wraps the input module, the latter case only happens when the input
module is a leaf module and we want to quantize it.</p>
</dd>
</dl>
</dd></dl>

</div>
<div class="section" id="utility-functions">
<h3>Utility functions<a class="headerlink" href="#utility-functions" title="Permalink to this headline">¶</a></h3>
<dl class="function">
<dt id="torch.quantization.add_observer_">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">add_observer_</code><span class="sig-paren">(</span><em class="sig-param">module</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#add_observer_"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.add_observer_" title="Permalink to this definition">¶</a></dt>
<dd><p>Add observer for the leaf child of the module.</p>
<p>This function insert observer module to all leaf child module that
has a valid qconfig attribute.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>module</strong> – input module with qconfig attributes for all the leaf modules that we want to quantize</p>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>None, module is modified inplace with added observer modules and forward_hooks</p>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.quantization.swap_module">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">swap_module</code><span class="sig-paren">(</span><em class="sig-param">mod</em>, <em class="sig-param">mapping</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#swap_module"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.swap_module" title="Permalink to this definition">¶</a></dt>
<dd><p>Swaps the module if it has a quantized counterpart and it has an
<cite>observer</cite> attached.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>mod</strong> – input module</p></li>
<li><p><strong>mapping</strong> – a dictionary that maps from nn module to nnq module</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>The corresponding quantized module of <cite>mod</cite></p>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.quantization.propagate_qconfig_">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">propagate_qconfig_</code><span class="sig-paren">(</span><em class="sig-param">module</em>, <em class="sig-param">qconfig_dict=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#propagate_qconfig_"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.propagate_qconfig_" title="Permalink to this definition">¶</a></dt>
<dd><p>Propagate qconfig through the module hierarchy and assign <cite>qconfig</cite>
attribute on each leaf module</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>module</strong> – input module</p></li>
<li><p><strong>qconfig_dict</strong> – dictionary that maps from name or type of submodule to
quantization configuration, qconfig applies to all submodules of a
given module unless qconfig for the submodules are specified (when
the submodule already has qconfig attribute)</p></li>
</ul>
</dd>
<dt class="field-even">Returns</dt>
<dd class="field-even"><p>None, module is modified inplace with qconfig attached</p>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.quantization.default_eval_fn">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">default_eval_fn</code><span class="sig-paren">(</span><em class="sig-param">model</em>, <em class="sig-param">calib_data</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization.html#default_eval_fn"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.default_eval_fn" title="Permalink to this definition">¶</a></dt>
<dd><p>Default evaluation function takes a torch.utils.data.Dataset or a list of
input Tensors and run the model on the dataset</p>
</dd></dl>

</div>
<div class="section" id="observers">
<h3>Observers<a class="headerlink" href="#observers" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.quantization.MinMaxObserver">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">MinMaxObserver</code><span class="sig-paren">(</span><em class="sig-param">dtype=torch.quint8</em>, <em class="sig-param">qscheme=torch.per_tensor_affine</em>, <em class="sig-param">reduce_range=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/observer.html#MinMaxObserver"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.MinMaxObserver" title="Permalink to this definition">¶</a></dt>
<dd><p>Observer module for computing the quantization parameters based on the
running min and max values.</p>
<p>This observer uses the tensor min/max statistics to compute the quantization
parameters. The module records the running minimum and maximum of incoming
tensors, and uses this statistic to compute the quantization parameters.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>dtype</strong> – Quantized data type</p></li>
<li><p><strong>qscheme</strong> – Quantization scheme to be used</p></li>
<li><p><strong>reduce_range</strong> – Reduces the range of the quantized data type by 1 bit</p></li>
</ul>
</dd>
</dl>
<p>Given running min/max as <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mtext>min</mtext></msub></mrow><annotation encoding="application/x-tex">x_\text{min}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.58056em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>

</span> and <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mtext>max</mtext></msub></mrow><annotation encoding="application/x-tex">x_\text{max}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.58056em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>

</span>,
scale <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>s</mi></mrow><annotation encoding="application/x-tex">s</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.43056em;vertical-align:0em;"></span><span class="mord mathdefault">s</span></span></span></span>

</span> and zero point <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>z</mi></mrow><annotation encoding="application/x-tex">z</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.43056em;vertical-align:0em;"></span><span class="mord mathdefault" style="margin-right:0.04398em;">z</span></span></span></span>

</span> are computed as:</p>
<p>The running minimum/maximum <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mtext>min/max</mtext></msub></mrow><annotation encoding="application/x-tex">x_\text{min/max}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.7857599999999999em;vertical-align:-0.3551999999999999em;"></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.34480000000000005em;"><span style="top:-2.5198em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min/max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.3551999999999999em;"><span></span></span></span></span></span></span></span></span></span>

</span> is computed as:</p>
<div class="math">
<span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mtable rowspacing="0.15999999999999992em" columnalign="left left" columnspacing="1em"><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><msub><mi>x</mi><mtext>min</mtext></msub></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mo>=</mo><mrow><mo fence="true">{</mo><mtable rowspacing="0.3599999999999999em" columnalign="left left" columnspacing="1em"><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mi>min</mi><mo>⁡</mo><mo stretchy="false">(</mo><mi>X</mi><mo stretchy="false">)</mo></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mtext>if </mtext><msub><mi>x</mi><mtext>min</mtext></msub><mo>=</mo><mtext>None</mtext></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mi>min</mi><mo>⁡</mo><mrow><mo fence="true">(</mo><msub><mi>x</mi><mtext>min</mtext></msub><mo separator="true">,</mo><mi>min</mi><mo>⁡</mo><mo stretchy="false">(</mo><mi>X</mi><mo stretchy="false">)</mo><mo fence="true">)</mo></mrow></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mtext>otherwise</mtext></mstyle></mtd></mtr></mtable></mrow></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><msub><mi>x</mi><mtext>max</mtext></msub></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mo>=</mo><mrow><mo fence="true">{</mo><mtable rowspacing="0.3599999999999999em" columnalign="left left" columnspacing="1em"><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mi>max</mi><mo>⁡</mo><mo stretchy="false">(</mo><mi>X</mi><mo stretchy="false">)</mo></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mtext>if </mtext><msub><mi>x</mi><mtext>max</mtext></msub><mo>=</mo><mtext>None</mtext></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mi>max</mi><mo>⁡</mo><mrow><mo fence="true">(</mo><msub><mi>x</mi><mtext>max</mtext></msub><mo separator="true">,</mo><mi>max</mi><mo>⁡</mo><mo stretchy="false">(</mo><mi>X</mi><mo stretchy="false">)</mo><mo fence="true">)</mo></mrow></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mtext>otherwise</mtext></mstyle></mtd></mtr></mtable></mrow></mrow></mstyle></mtd></mtr></mtable><annotation encoding="application/x-tex">\begin{array}{ll}
x_\text{min} &amp;= \begin{cases}
    \min(X) &amp; \text{if~}x_\text{min} = \text{None} \\
    \min\left(x_\text{min}, \min(X)\right) &amp; \text{otherwise}
\end{cases}\\
x_\text{max} &amp;= \begin{cases}
    \max(X) &amp; \text{if~}x_\text{max} = \text{None} \\
    \max\left(x_\text{max}, \max(X)\right) &amp; \text{otherwise}
\end{cases}\\
\end{array}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:6.0000599999999995em;vertical-align:-2.7500299999999998em;"></span><span class="mord"><span class="mtable"><span class="arraycolsep" style="width:0.5em;"></span><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:3.2500299999999998em;"><span style="top:-5.25003em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:2.7500299999999998em;"><span></span></span></span></span></span><span class="arraycolsep" style="width:0.5em;"></span><span class="arraycolsep" style="width:0.5em;"></span><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:3.2500299999999998em;"><span style="top:-5.25003em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size4">{</span></span><span class="mord"><span class="mtable"><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.69em;"><span style="top:-3.69em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mop">min</span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.07847em;">X</span><span class="mclose">)</span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mop">min</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;">(</span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mop">min</span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.07847em;">X</span><span class="mclose">)</span><span class="mclose delimcenter" style="top:0em;">)</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.19em;"><span></span></span></span></span></span><span class="arraycolsep" style="width:1em;"></span><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.69em;"><span style="top:-3.69em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">if</span><span class="mord nobreak"> </span></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mord text"><span class="mord">None</span></span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">otherwise</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.19em;"><span></span></span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size4">{</span></span><span class="mord"><span class="mtable"><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.69em;"><span style="top:-3.69em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mop">max</span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.07847em;">X</span><span class="mclose">)</span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mop">max</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;">(</span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mop">max</span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.07847em;">X</span><span class="mclose">)</span><span class="mclose delimcenter" style="top:0em;">)</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.19em;"><span></span></span></span></span></span><span class="arraycolsep" style="width:1em;"></span><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.69em;"><span style="top:-3.69em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">if</span><span class="mord nobreak"> </span></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mord text"><span class="mord">None</span></span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">otherwise</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.19em;"><span></span></span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:2.7500299999999998em;"><span></span></span></span></span></span><span class="arraycolsep" style="width:0.5em;"></span></span></span></span></span></span></span>

</div><p>where <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>X</mi></mrow><annotation encoding="application/x-tex">X</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.68333em;vertical-align:0em;"></span><span class="mord mathdefault" style="margin-right:0.07847em;">X</span></span></span></span>

</span> is the observed tensor.</p>
<p>The scale <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>s</mi></mrow><annotation encoding="application/x-tex">s</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.43056em;vertical-align:0em;"></span><span class="mord mathdefault">s</span></span></span></span>

</span> and zero point <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>z</mi></mrow><annotation encoding="application/x-tex">z</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.43056em;vertical-align:0em;"></span><span class="mord mathdefault" style="margin-right:0.04398em;">z</span></span></span></span>

</span> are then computed as:</p>
<div class="math">
<span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mtable rowspacing="0.24999999999999992em" columnalign="right left" columnspacing="0em"><mtr><mtd><mstyle scriptlevel="0" displaystyle="true"><mtext>if Symmetric:</mtext></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="true"><mrow></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="true"><mrow></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="true"><mrow><mrow></mrow><mi>s</mi><mo>=</mo><mn>2</mn><mi>max</mi><mo>⁡</mo><mo stretchy="false">(</mo><mi mathvariant="normal">∣</mi><msub><mi>x</mi><mtext>min</mtext></msub><mi mathvariant="normal">∣</mi><mo separator="true">,</mo><msub><mi>x</mi><mtext>max</mtext></msub><mo stretchy="false">)</mo><mi mathvariant="normal">/</mi><mrow><mo fence="true">(</mo><msub><mi>Q</mi><mtext>max</mtext></msub><mo>−</mo><msub><mi>Q</mi><mtext>min</mtext></msub><mo fence="true">)</mo></mrow></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="true"><mrow></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="true"><mrow><mrow></mrow><mi>z</mi><mo>=</mo><mrow><mo fence="true">{</mo><mtable rowspacing="0.3599999999999999em" columnalign="left left" columnspacing="1em"><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mn>0</mn></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mtext>if dtype is qint8</mtext></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mn>128</mn></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mtext>otherwise</mtext></mstyle></mtd></mtr></mtable></mrow></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="true"><mtext>Otherwise:</mtext></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="true"><mrow></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="true"><mrow></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="true"><mrow><mrow></mrow><mi>s</mi><mo>=</mo><mrow><mo fence="true">(</mo><msub><mi>x</mi><mtext>max</mtext></msub><mo>−</mo><msub><mi>x</mi><mtext>min</mtext></msub><mo fence="true">)</mo></mrow><mi mathvariant="normal">/</mi><mrow><mo fence="true">(</mo><msub><mi>Q</mi><mtext>max</mtext></msub><mo>−</mo><msub><mi>Q</mi><mtext>min</mtext></msub><mo fence="true">)</mo></mrow></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="true"><mrow></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="true"><mrow><mrow></mrow><mi>z</mi><mo>=</mo><msub><mi>Q</mi><mtext>min</mtext></msub><mo>−</mo><mtext>round</mtext><mo stretchy="false">(</mo><msub><mi>x</mi><mtext>min</mtext></msub><mi mathvariant="normal">/</mi><mi>s</mi><mo stretchy="false">)</mo></mrow></mstyle></mtd></mtr></mtable><annotation encoding="application/x-tex">\begin{aligned}
    \text{if Symmetric:}&amp;\\
    &amp;s = 2 \max(|x_\text{min}|, x_\text{max}) /
        \left( Q_\text{max} - Q_\text{min} \right) \\
    &amp;z = \begin{cases}
        0 &amp; \text{if dtype is qint8} \\
        128 &amp; \text{otherwise}
    \end{cases}\\
    \text{Otherwise:}&amp;\\
        &amp;s = \left( x_\text{max} - x_\text{min}  \right ) /
            \left( Q_\text{max} - Q_\text{min} \right ) \\
        &amp;z = Q_\text{min} - \text{round}(x_\text{min} / s)
\end{aligned}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:10.80003em;vertical-align:-5.150015em;"></span><span class="mord"><span class="mtable"><span class="col-align-r"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:5.650015em;"><span style="top:-8.560015em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord text"><span class="mord">if Symmetric:</span></span></span></span><span style="top:-7.060014999999999em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"></span></span><span style="top:-4.650015em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"></span></span><span style="top:-2.259985em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord text"><span class="mord">Otherwise:</span></span></span></span><span style="top:-0.7599850000000004em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"></span></span><span style="top:0.7400149999999996em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:5.150015em;"><span></span></span></span></span></span><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:5.650015em;"><span style="top:-8.560015em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord"></span></span></span><span style="top:-7.060014999999999em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord"></span><span class="mord mathdefault">s</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mord">2</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mop">max</span><span class="mopen">(</span><span class="mord">∣</span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mord">∣</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose">)</span><span class="mord">/</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;">(</span><span class="mord"><span class="mord mathdefault">Q</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">−</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mord"><span class="mord mathdefault">Q</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose delimcenter" style="top:0em;">)</span></span></span></span><span style="top:-4.650015em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord"></span><span class="mord mathdefault" style="margin-right:0.04398em;">z</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size4">{</span></span><span class="mord"><span class="mtable"><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.69em;"><span style="top:-3.69em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord">0</span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord">1</span><span class="mord">2</span><span class="mord">8</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.19em;"><span></span></span></span></span></span><span class="arraycolsep" style="width:1em;"></span><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.69em;"><span style="top:-3.69em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">if dtype is qint8</span></span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">otherwise</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.19em;"><span></span></span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span></span></span><span style="top:-2.259985em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord"></span></span></span><span style="top:-0.7599850000000004em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord"></span><span class="mord mathdefault">s</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;">(</span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">−</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose delimcenter" style="top:0em;">)</span></span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord">/</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;">(</span><span class="mord"><span class="mord mathdefault">Q</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">−</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mord"><span class="mord mathdefault">Q</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mclose delimcenter" style="top:0em;">)</span></span></span></span><span style="top:0.7400149999999996em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord"></span><span class="mord mathdefault" style="margin-right:0.04398em;">z</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mord"><span class="mord mathdefault">Q</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">−</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mord text"><span class="mord">round</span></span><span class="mopen">(</span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mord">/</span><span class="mord mathdefault">s</span><span class="mclose">)</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:5.150015em;"><span></span></span></span></span></span></span></span></span></span></span></span>

</div><p>where <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>Q</mi><mtext>min</mtext></msub></mrow><annotation encoding="application/x-tex">Q_\text{min}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8777699999999999em;vertical-align:-0.19444em;"></span><span class="mord"><span class="mord mathdefault">Q</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>

</span> and <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>Q</mi><mtext>max</mtext></msub></mrow><annotation encoding="application/x-tex">Q_\text{max}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.8777699999999999em;vertical-align:-0.19444em;"></span><span class="mord"><span class="mord mathdefault">Q</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>

</span> are the minimum and
maximum of the quantized data type.</p>
<div class="admonition warning">
<p class="admonition-title">Warning</p>
<p>Only works with <code class="docutils literal notranslate"><span class="pre">torch.per_tensor_symmetric</span></code> quantization scheme</p>
</div>
<div class="admonition warning">
<p class="admonition-title">Warning</p>
<p><code class="xref py py-attr docutils literal notranslate"><span class="pre">dtype</span></code> can only take <code class="docutils literal notranslate"><span class="pre">torch.qint8</span></code> or <code class="docutils literal notranslate"><span class="pre">torch.quint8</span></code>.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>If the running minimum equals to the running maximum, the scale
and zero_point are set to 1.0 and 0.</p>
</div>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.MovingAverageMinMaxObserver">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">MovingAverageMinMaxObserver</code><span class="sig-paren">(</span><em class="sig-param">averaging_constant=0.01</em>, <em class="sig-param">dtype=torch.quint8</em>, <em class="sig-param">qscheme=torch.per_tensor_affine</em>, <em class="sig-param">reduce_range=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/observer.html#MovingAverageMinMaxObserver"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.MovingAverageMinMaxObserver" title="Permalink to this definition">¶</a></dt>
<dd><p>Observer module for computing the quantization parameters based on the
moving average of the min and max values.</p>
<p>This observer computes the quantization parameters based on the moving
averages of minimums and maximums of the incoming tensors. The module
records the average minimum and maximum of incoming tensors, and uses this
statistic to compute the quantization parameters.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>averaging_constant</strong> – Averaging constant for min/max.</p></li>
<li><p><strong>dtype</strong> – Quantized data type</p></li>
<li><p><strong>qscheme</strong> – Quantization scheme to be used</p></li>
<li><p><strong>reduce_range</strong> – Reduces the range of the quantized data type by 1 bit</p></li>
</ul>
</dd>
</dl>
<p>The moving average min/max is computed as follows</p>
<div class="math">
<span class="katex-display"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mtable rowspacing="0.15999999999999992em" columnalign="left left" columnspacing="1em"><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><msub><mi>x</mi><mtext>min</mtext></msub><mo>=</mo><mrow><mo fence="true">{</mo><mtable rowspacing="0.3599999999999999em" columnalign="left left" columnspacing="1em"><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mi>min</mi><mo>⁡</mo><mo stretchy="false">(</mo><mi>X</mi><mo stretchy="false">)</mo></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mtext>if </mtext><msub><mi>x</mi><mtext>min</mtext></msub><mo>=</mo><mtext>None</mtext></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mo stretchy="false">(</mo><mn>1</mn><mo>−</mo><mi>c</mi><mo stretchy="false">)</mo><msub><mi>x</mi><mtext>min</mtext></msub><mo>+</mo><mi>c</mi><mi>min</mi><mo>⁡</mo><mo stretchy="false">(</mo><mi>X</mi><mo stretchy="false">)</mo></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mtext>otherwise</mtext></mstyle></mtd></mtr></mtable></mrow></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><msub><mi>x</mi><mtext>max</mtext></msub><mo>=</mo><mrow><mo fence="true">{</mo><mtable rowspacing="0.3599999999999999em" columnalign="left left" columnspacing="1em"><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mi>max</mi><mo>⁡</mo><mo stretchy="false">(</mo><mi>X</mi><mo stretchy="false">)</mo></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mtext>if </mtext><msub><mi>x</mi><mtext>max</mtext></msub><mo>=</mo><mtext>None</mtext></mrow></mstyle></mtd></mtr><mtr><mtd><mstyle scriptlevel="0" displaystyle="false"><mrow><mo stretchy="false">(</mo><mn>1</mn><mo>−</mo><mi>c</mi><mo stretchy="false">)</mo><msub><mi>x</mi><mtext>max</mtext></msub><mo>+</mo><mi>c</mi><mi>max</mi><mo>⁡</mo><mo stretchy="false">(</mo><mi>X</mi><mo stretchy="false">)</mo></mrow></mstyle></mtd><mtd><mstyle scriptlevel="0" displaystyle="false"><mtext>otherwise</mtext></mstyle></mtd></mtr></mtable></mrow></mrow></mstyle></mtd></mtr></mtable><annotation encoding="application/x-tex">\begin{array}{ll}
        x_\text{min} = \begin{cases}
            \min(X) &amp; \text{if~}x_\text{min} = \text{None} \\
            (1 - c) x_\text{min} + c \min(X) &amp; \text{otherwise}
        \end{cases}\\
        x_\text{max} = \begin{cases}
            \max(X) &amp; \text{if~}x_\text{max} = \text{None} \\
            (1 - c) x_\text{max} + c \max(X) &amp; \text{otherwise}
        \end{cases}\\
\end{array}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:6.0000599999999995em;vertical-align:-2.7500299999999998em;"></span><span class="mord"><span class="mtable"><span class="arraycolsep" style="width:0.5em;"></span><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:3.2500299999999998em;"><span style="top:-5.25003em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size4">{</span></span><span class="mord"><span class="mtable"><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.69em;"><span style="top:-3.69em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mop">min</span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.07847em;">X</span><span class="mclose">)</span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mopen">(</span><span class="mord">1</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">−</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mord mathdefault">c</span><span class="mclose">)</span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">+</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mord mathdefault">c</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mop">min</span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.07847em;">X</span><span class="mclose">)</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.19em;"><span></span></span></span></span></span><span class="arraycolsep" style="width:1em;"></span><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.69em;"><span style="top:-3.69em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">if</span><span class="mord nobreak"> </span></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.31750199999999995em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mord text"><span class="mord">None</span></span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">otherwise</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.19em;"><span></span></span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.75em;"></span><span class="mord"><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="minner"><span class="mopen delimcenter" style="top:0em;"><span class="delimsizing size4">{</span></span><span class="mord"><span class="mtable"><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.69em;"><span style="top:-3.69em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mop">max</span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.07847em;">X</span><span class="mclose">)</span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mopen">(</span><span class="mord">1</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">−</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mord mathdefault">c</span><span class="mclose">)</span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">+</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mord mathdefault">c</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mop">max</span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.07847em;">X</span><span class="mclose">)</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.19em;"><span></span></span></span></span></span><span class="arraycolsep" style="width:1em;"></span><span class="col-align-l"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.69em;"><span style="top:-3.69em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">if</span><span class="mord nobreak"> </span></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.151392em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mord text"><span class="mord">None</span></span></span></span><span style="top:-2.25em;"><span class="pstrut" style="height:3.008em;"></span><span class="mord"><span class="mord text"><span class="mord">otherwise</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:1.19em;"><span></span></span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:2.7500299999999998em;"><span></span></span></span></span></span><span class="arraycolsep" style="width:0.5em;"></span></span></span></span></span></span></span>

</div><p>where <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mtext>min/max</mtext></msub></mrow><annotation encoding="application/x-tex">x_\text{min/max}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.7857599999999999em;vertical-align:-0.3551999999999999em;"></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.34480000000000005em;"><span style="top:-2.5198em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord text mtight"><span class="mord mtight">min/max</span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.3551999999999999em;"><span></span></span></span></span></span></span></span></span></span>

</span> is the running average min/max, <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>X</mi></mrow><annotation encoding="application/x-tex">X</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.68333em;vertical-align:0em;"></span><span class="mord mathdefault" style="margin-right:0.07847em;">X</span></span></span></span>

</span> is
is the incoming tensor, and <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>c</mi></mrow><annotation encoding="application/x-tex">c</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.43056em;vertical-align:0em;"></span><span class="mord mathdefault">c</span></span></span></span>

</span> is the <code class="docutils literal notranslate"><span class="pre">averaging_constant</span></code>.</p>
<p>The scale and zero point are then computed as in
<code class="xref py py-class docutils literal notranslate"><span class="pre">MinMaxObserver</span></code>.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only works with <code class="docutils literal notranslate"><span class="pre">torch.per_tensor_affine</span></code> quantization shceme.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>If the running minimum equals to the running maximum, the scale
and zero_point are set to 1.0 and 0.</p>
</div>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.PerChannelMinMaxObserver">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">PerChannelMinMaxObserver</code><span class="sig-paren">(</span><em class="sig-param">ch_axis=0</em>, <em class="sig-param">dtype=torch.quint8</em>, <em class="sig-param">qscheme=torch.per_channel_affine</em>, <em class="sig-param">reduce_range=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/observer.html#PerChannelMinMaxObserver"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.PerChannelMinMaxObserver" title="Permalink to this definition">¶</a></dt>
<dd><p>Observer module for computing the quantization parameters based on the
running per channel min and max values.</p>
<p>This observer uses the tensor min/max statistics to compute the per channel
quantization parameters. The module records the running minimum and maximum
of incoming tensors, and uses this statistic to compute the quantization
parameters.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>ch_axis</strong> – Channel axis</p></li>
<li><p><strong>dtype</strong> – Quantized data type</p></li>
<li><p><strong>qscheme</strong> – Quantization scheme to be used</p></li>
<li><p><strong>reduce_range</strong> – Reduces the range of the quantized data type by 1 bit</p></li>
</ul>
</dd>
</dl>
<p>The quantization parameters are computed the same way as in
<code class="xref py py-class docutils literal notranslate"><span class="pre">MinMaxObserver</span></code>, with the difference
that the running min/max values are stored per channel.
Scales and zero points are thus computed per channel as well.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.</p>
</div>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.MovingAveragePerChannelMinMaxObserver">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">MovingAveragePerChannelMinMaxObserver</code><span class="sig-paren">(</span><em class="sig-param">averaging_constant=0.01</em>, <em class="sig-param">ch_axis=0</em>, <em class="sig-param">dtype=torch.quint8</em>, <em class="sig-param">qscheme=torch.per_channel_affine</em>, <em class="sig-param">reduce_range=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/observer.html#MovingAveragePerChannelMinMaxObserver"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.MovingAveragePerChannelMinMaxObserver" title="Permalink to this definition">¶</a></dt>
<dd><p>Observer module for computing the quantization parameters based on the
running per channel min and max values.</p>
<p>This observer uses the tensor min/max statistics to compute the per channel
quantization parameters. The module records the running minimum and maximum
of incoming tensors, and uses this statistic to compute the quantization
parameters.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>averaging_constant</strong> – Averaging constant for min/max.</p></li>
<li><p><strong>ch_axis</strong> – Channel axis</p></li>
<li><p><strong>dtype</strong> – Quantized data type</p></li>
<li><p><strong>qscheme</strong> – Quantization scheme to be used</p></li>
<li><p><strong>reduce_range</strong> – Reduces the range of the quantized data type by 1 bit</p></li>
</ul>
</dd>
</dl>
<p>The quantization parameters are computed the same way as in
<code class="xref py py-class docutils literal notranslate"><span class="pre">MovingAverageMinMaxObserver</span></code>, with the
difference that the running min/max values are stored per channel.
Scales and zero points are thus computed per channel as well.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>If the running minimum equals to the running maximum, the scales
and zero_points are set to 1.0 and 0.</p>
</div>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.HistogramObserver">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">HistogramObserver</code><span class="sig-paren">(</span><em class="sig-param">bins=2048</em>, <em class="sig-param">upsample_rate=128</em>, <em class="sig-param">dtype=torch.quint8</em>, <em class="sig-param">qscheme=torch.per_tensor_affine</em>, <em class="sig-param">reduce_range=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/observer.html#HistogramObserver"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.HistogramObserver" title="Permalink to this definition">¶</a></dt>
<dd><p>The module records the running histogram of tensor values along with
min/max values. <code class="docutils literal notranslate"><span class="pre">calculate_qparams</span></code> will calculate scale and zero_point.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>bins</strong> – Number of bins to use for the histogram</p></li>
<li><p><strong>upsample_rate</strong> – Factor by which the histograms are upsampled, this is
used to interpolate histograms with varying ranges across observations</p></li>
<li><p><strong>dtype</strong> – Quantized data type</p></li>
<li><p><strong>qscheme</strong> – Quantization scheme to be used</p></li>
<li><p><strong>reduce_range</strong> – Reduces the range of the quantized data type by 1 bit</p></li>
</ul>
</dd>
</dl>
<p>The scale and zero point are computed as follows:</p>
<ol class="arabic simple">
<li><dl class="simple">
<dt>Create the histogram of the incoming inputs.</dt><dd><p>The histogram is computed continuously, and the ranges per bin change
with every new tensor observed.</p>
</dd>
</dl>
</li>
<li><dl class="simple">
<dt>Search the distribution in the histogram for optimal min/max values.</dt><dd><p>The search for the min/max values ensures the minimization of the
quantization error with respect to the floating point model.</p>
</dd>
</dl>
</li>
<li><dl class="simple">
<dt>Compute the scale and zero point the same way as in the</dt><dd><p><a class="reference internal" href="#torch.quantization.MinMaxObserver" title="torch.quantization.MinMaxObserver"><code class="xref py py-class docutils literal notranslate"><span class="pre">MinMaxObserver</span></code></a></p>
</dd>
</dl>
</li>
</ol>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.FakeQuantize">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">FakeQuantize</code><span class="sig-paren">(</span><em class="sig-param">observer=&lt;class 'torch.quantization.observer.MovingAverageMinMaxObserver'&gt;</em>, <em class="sig-param">quant_min=0</em>, <em class="sig-param">quant_max=255</em>, <em class="sig-param">**observer_kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/fake_quantize.html#FakeQuantize"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.FakeQuantize" title="Permalink to this definition">¶</a></dt>
<dd><p>Simulate the quantize and dequantize operations in training time.
The output of this module is given by</p>
<p>x_out = (clamp(round(x/scale + zero_point), quant_min, quant_max)-zero_point)*scale</p>
<ul class="simple">
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">scale</span></code> defines the scale factor used for quantization.</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">zero_point</span></code> specifies the quantized value to which 0 in floating point maps to</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">quant_min</span></code> specifies the minimum allowable quantized value.</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">quant_max</span></code> specifies the maximum allowable quantized value.</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">fake_quant_enable</span></code> controls the application of fake quantization on tensors, note that
statistics can still be updated.</p></li>
<li><p><code class="xref py py-attr docutils literal notranslate"><span class="pre">observer_enable</span></code> controls statistics collection on tensors</p></li>
<li><dl class="simple">
<dt><code class="xref py py-attr docutils literal notranslate"><span class="pre">dtype</span></code> specifies the quantized dtype that is being emulated with fake-quantization,</dt><dd><p>allowable values are torch.qint8 and torch.quint8. The values of quant_min and
quant_max should be chosen to be consistent with the dtype</p>
</dd>
</dl>
</li>
</ul>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>observer</strong> (<em>module</em>) – Module for observing statistics on input tensors and calculating scale
and zero-point.</p></li>
<li><p><strong>quant_min</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a>) – The minimum allowable quantized value.</p></li>
<li><p><strong>quant_max</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a>) – The maximum allowable quantized value.</p></li>
<li><p><strong>observer_kwargs</strong> (<em>optional</em>) – Arguments for the observer module</p></li>
</ul>
</dd>
<dt class="field-even">Variables</dt>
<dd class="field-even"><p><strong>~FakeQuantize.observer</strong> (<a class="reference internal" href="nn.html#torch.nn.Module" title="torch.nn.Module"><em>Module</em></a>) – User provided module that collects statistics on the input tensor and
provides a method to calculate scale and zero-point.</p>
</dd>
</dl>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.NoopObserver">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">NoopObserver</code><span class="sig-paren">(</span><em class="sig-param">dtype=torch.float16</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/observer.html#NoopObserver"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.NoopObserver" title="Permalink to this definition">¶</a></dt>
<dd><p>Observer that doesn’t do anything and just passes its configuration to the
quantized module’s <code class="docutils literal notranslate"><span class="pre">.from_float()</span></code>.</p>
<p>Primarily used for quantization to float16 which doesn’t require determining
ranges.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>dtype</strong> – Quantized data type</p>
</dd>
</dl>
</dd></dl>

</div>
<div class="section" id="debugging-utilities">
<h3>Debugging utilities<a class="headerlink" href="#debugging-utilities" title="Permalink to this headline">¶</a></h3>
<dl class="function">
<dt id="torch.quantization.get_observer_dict">
<code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">get_observer_dict</code><span class="sig-paren">(</span><em class="sig-param">mod</em>, <em class="sig-param">target_dict</em>, <em class="sig-param">prefix=''</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/quantize.html#get_observer_dict"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.get_observer_dict" title="Permalink to this definition">¶</a></dt>
<dd><p>Traverse the modules and save all observers into dict.
This is mainly used for quantization accuracy debug
:param mod: the top module we want to save all observers
:param prefix: the prefix for the current module
:param target_dict: the dictionary used to save all the observers</p>
</dd></dl>

<dl class="class">
<dt id="torch.quantization.RecordingObserver">
<em class="property">class </em><code class="sig-prename descclassname">torch.quantization.</code><code class="sig-name descname">RecordingObserver</code><span class="sig-paren">(</span><em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/quantization/observer.html#RecordingObserver"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.quantization.RecordingObserver" title="Permalink to this definition">¶</a></dt>
<dd><p>The module is mainly for debug and records the tensor values during runtime.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>dtype</strong> – Quantized data type</p></li>
<li><p><strong>qscheme</strong> – Quantization scheme to be used</p></li>
<li><p><strong>reduce_range</strong> – Reduces the range of the quantized data type by 1 bit</p></li>
</ul>
</dd>
</dl>
</dd></dl>

</div>
</div>
<div class="section" id="id2">
<h2>torch.nn.intrinsic<a class="headerlink" href="#id2" title="Permalink to this headline">¶</a></h2>
<p>This module implements the combined (fused) modules conv + relu which can be then quantized.</p>
<span class="target" id="module-torch.nn.intrinsic"></span><div class="section" id="convbn2d">
<h3>ConvBn2d<a class="headerlink" href="#convbn2d" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.ConvBn2d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.</code><code class="sig-name descname">ConvBn2d</code><span class="sig-paren">(</span><em class="sig-param">conv</em>, <em class="sig-param">bn</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/modules/fused.html#ConvBn2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.ConvBn2d" title="Permalink to this definition">¶</a></dt>
<dd><p>This is a sequential container which calls the Conv 2d and Batch Norm 2d modules.
During quantization this will be replaced with the corresponding fused module.</p>
</dd></dl>

</div>
<div class="section" id="convbnrelu2d">
<h3>ConvBnReLU2d<a class="headerlink" href="#convbnrelu2d" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.ConvBnReLU2d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.</code><code class="sig-name descname">ConvBnReLU2d</code><span class="sig-paren">(</span><em class="sig-param">conv</em>, <em class="sig-param">bn</em>, <em class="sig-param">relu</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/modules/fused.html#ConvBnReLU2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.ConvBnReLU2d" title="Permalink to this definition">¶</a></dt>
<dd><p>This is a sequential container which calls the Conv 2d, Batch Norm 2d, and ReLU modules.
During quantization this will be replaced with the corresponding fused module.</p>
</dd></dl>

</div>
<div class="section" id="convrelu2d">
<h3>ConvReLU2d<a class="headerlink" href="#convrelu2d" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.ConvReLU2d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.</code><code class="sig-name descname">ConvReLU2d</code><span class="sig-paren">(</span><em class="sig-param">conv</em>, <em class="sig-param">relu</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/modules/fused.html#ConvReLU2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.ConvReLU2d" title="Permalink to this definition">¶</a></dt>
<dd><p>This is a sequential container which calls the Conv 2d and ReLU modules.
During quantization this will be replaced with the corresponding fused module.</p>
</dd></dl>

</div>
<div class="section" id="convrelu3d">
<h3>ConvReLU3d<a class="headerlink" href="#convrelu3d" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.ConvReLU3d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.</code><code class="sig-name descname">ConvReLU3d</code><span class="sig-paren">(</span><em class="sig-param">conv</em>, <em class="sig-param">relu</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/modules/fused.html#ConvReLU3d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.ConvReLU3d" title="Permalink to this definition">¶</a></dt>
<dd><p>This is a sequential container which calls the Conv 3d and ReLU modules.
During quantization this will be replaced with the corresponding fused module.</p>
</dd></dl>

</div>
<div class="section" id="linearrelu">
<h3>LinearReLU<a class="headerlink" href="#linearrelu" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.LinearReLU">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.</code><code class="sig-name descname">LinearReLU</code><span class="sig-paren">(</span><em class="sig-param">linear</em>, <em class="sig-param">relu</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/modules/fused.html#LinearReLU"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.LinearReLU" title="Permalink to this definition">¶</a></dt>
<dd><p>This is a sequential container which calls the Linear and ReLU modules.
During quantization this will be replaced with the corresponding fused module.</p>
</dd></dl>

</div>
</div>
<div class="section" id="torch-nn-instrinsic-qat">
<h2>torch.nn.instrinsic.qat<a class="headerlink" href="#torch-nn-instrinsic-qat" title="Permalink to this headline">¶</a></h2>
<p>This module implements the versions of those fused operations needed for quantization aware training.</p>
<span class="target" id="module-torch.nn.intrinsic.qat"></span><div class="section" id="id3">
<h3>ConvBn2d<a class="headerlink" href="#id3" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.qat.ConvBn2d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.qat.</code><code class="sig-name descname">ConvBn2d</code><span class="sig-paren">(</span><em class="sig-param">in_channels</em>, <em class="sig-param">out_channels</em>, <em class="sig-param">kernel_size</em>, <em class="sig-param">stride=1</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">groups=1</em>, <em class="sig-param">padding_mode='zeros'</em>, <em class="sig-param">eps=1e-05</em>, <em class="sig-param">momentum=0.1</em>, <em class="sig-param">freeze_bn=False</em>, <em class="sig-param">qconfig=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/qat/modules/conv_fused.html#ConvBn2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.qat.ConvBn2d" title="Permalink to this definition">¶</a></dt>
<dd><p>A ConvBn2d module is a module fused from Conv2d and BatchNorm2d,
attached with FakeQuantize modules for both output activation and weight,
used in quantization aware training.</p>
<p>We combined the interface of <a class="reference internal" href="nn.html#torch.nn.Conv2d" title="torch.nn.Conv2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.Conv2d</span></code></a> and
<a class="reference internal" href="nn.html#torch.nn.BatchNorm2d" title="torch.nn.BatchNorm2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.BatchNorm2d</span></code></a>.</p>
<p>Implementation details: <a class="reference external" href="https://arxiv.org/pdf/1806.08342.pdf">https://arxiv.org/pdf/1806.08342.pdf</a> section 3.2.2</p>
<p>Similar to <a class="reference internal" href="nn.html#torch.nn.Conv2d" title="torch.nn.Conv2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.Conv2d</span></code></a>, with FakeQuantize modules initialized
to default.</p>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>~ConvBn2d.freeze_bn</strong> – </p></li>
<li><p><strong>~ConvBn2d.activation_post_process</strong> – fake quant module for output activation</p></li>
<li><p><strong>~ConvBn2d.weight_fake_quant</strong> – fake quant module for weight</p></li>
</ul>
</dd>
</dl>
</dd></dl>

</div>
<div class="section" id="id4">
<h3>ConvBnReLU2d<a class="headerlink" href="#id4" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.qat.ConvBnReLU2d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.qat.</code><code class="sig-name descname">ConvBnReLU2d</code><span class="sig-paren">(</span><em class="sig-param">in_channels</em>, <em class="sig-param">out_channels</em>, <em class="sig-param">kernel_size</em>, <em class="sig-param">stride=1</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">groups=1</em>, <em class="sig-param">padding_mode='zeros'</em>, <em class="sig-param">eps=1e-05</em>, <em class="sig-param">momentum=0.1</em>, <em class="sig-param">freeze_bn=False</em>, <em class="sig-param">qconfig=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/qat/modules/conv_fused.html#ConvBnReLU2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.qat.ConvBnReLU2d" title="Permalink to this definition">¶</a></dt>
<dd><p>A ConvBnReLU2d module is a module fused from Conv2d, BatchNorm2d and ReLU,
attached with FakeQuantize modules for both output activation and weight,
used in quantization aware training.</p>
<p>We combined the interface of <a class="reference internal" href="nn.html#torch.nn.Conv2d" title="torch.nn.Conv2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.Conv2d</span></code></a> and
<a class="reference internal" href="nn.html#torch.nn.BatchNorm2d" title="torch.nn.BatchNorm2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.BatchNorm2d</span></code></a> and <a class="reference internal" href="nn.html#torch.nn.ReLU" title="torch.nn.ReLU"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.ReLU</span></code></a>.</p>
<p>Implementation details: <a class="reference external" href="https://arxiv.org/pdf/1806.08342.pdf">https://arxiv.org/pdf/1806.08342.pdf</a></p>
<p>Similar to <cite>torch.nn.Conv2d</cite>, with FakeQuantize modules initialized to
default.</p>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>~ConvBnReLU2d.observer</strong> – fake quant module for output activation, it’s called observer
to align with post training flow</p></li>
<li><p><strong>~ConvBnReLU2d.weight_fake_quant</strong> – fake quant module for weight</p></li>
</ul>
</dd>
</dl>
</dd></dl>

</div>
<div class="section" id="id5">
<h3>ConvReLU2d<a class="headerlink" href="#id5" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.qat.ConvReLU2d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.qat.</code><code class="sig-name descname">ConvReLU2d</code><span class="sig-paren">(</span><em class="sig-param">in_channels</em>, <em class="sig-param">out_channels</em>, <em class="sig-param">kernel_size</em>, <em class="sig-param">stride=1</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">groups=1</em>, <em class="sig-param">bias=True</em>, <em class="sig-param">padding_mode='zeros'</em>, <em class="sig-param">qconfig=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/qat/modules/conv_fused.html#ConvReLU2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.qat.ConvReLU2d" title="Permalink to this definition">¶</a></dt>
<dd><p>A ConvReLU2d module is a fused module of Conv2d and ReLU, attached with
FakeQuantize modules for both output activation and weight for
quantization aware training.</p>
<p>We combined the interface of <a class="reference internal" href="nn.html#torch.nn.Conv2d" title="torch.nn.Conv2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">Conv2d</span></code></a> and
<a class="reference internal" href="nn.html#torch.nn.BatchNorm2d" title="torch.nn.BatchNorm2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">BatchNorm2d</span></code></a>.</p>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>~ConvReLU2d.activation_post_process</strong> – fake quant module for output activation</p></li>
<li><p><strong>~ConvReLU2d.weight_fake_quant</strong> – fake quant module for weight</p></li>
</ul>
</dd>
</dl>
</dd></dl>

</div>
<div class="section" id="id6">
<h3>LinearReLU<a class="headerlink" href="#id6" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.qat.LinearReLU">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.qat.</code><code class="sig-name descname">LinearReLU</code><span class="sig-paren">(</span><em class="sig-param">in_features</em>, <em class="sig-param">out_features</em>, <em class="sig-param">bias=True</em>, <em class="sig-param">qconfig=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/qat/modules/linear_relu.html#LinearReLU"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.qat.LinearReLU" title="Permalink to this definition">¶</a></dt>
<dd><p>A LinearReLU module fused from Linear and ReLU modules, attached with
FakeQuantize modules for output activation and weight, used in
quantization aware training.</p>
<p>We adopt the same interface as <a class="reference internal" href="nn.html#torch.nn.Linear" title="torch.nn.Linear"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.Linear</span></code></a>.</p>
<p>Similar to <cite>torch.nn.intrinsic.LinearReLU</cite>, with FakeQuantize modules initialized to
default.</p>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>~LinearReLU.activation_post_process</strong> – fake quant module for output activation</p></li>
<li><p><strong>~LinearReLU.weight</strong> – fake quant module for weight</p></li>
</ul>
</dd>
</dl>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">qat</span><span class="o">.</span><span class="n">LinearReLU</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">30</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">128</span><span class="p">,</span> <span class="mi">20</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">m</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">print</span><span class="p">(</span><span class="n">output</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
<span class="go">torch.Size([128, 30])</span>
</pre></div>
</div>
</dd></dl>

</div>
</div>
<div class="section" id="torch-nn-intrinsic-quantized">
<h2>torch.nn.intrinsic.quantized<a class="headerlink" href="#torch-nn-intrinsic-quantized" title="Permalink to this headline">¶</a></h2>
<p>This module implements the quantized implementations of fused operations like conv + relu.</p>
<span class="target" id="module-torch.nn.intrinsic.quantized"></span><div class="section" id="id7">
<h3>ConvReLU2d<a class="headerlink" href="#id7" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.quantized.ConvReLU2d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.quantized.</code><code class="sig-name descname">ConvReLU2d</code><span class="sig-paren">(</span><em class="sig-param">in_channels</em>, <em class="sig-param">out_channels</em>, <em class="sig-param">kernel_size</em>, <em class="sig-param">stride=1</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">groups=1</em>, <em class="sig-param">bias=True</em>, <em class="sig-param">padding_mode='zeros'</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/quantized/modules/conv_relu.html#ConvReLU2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.quantized.ConvReLU2d" title="Permalink to this definition">¶</a></dt>
<dd><p>A ConvReLU2d module is a fused module of Conv2d and ReLU</p>
<p>We adopt the same interface as <a class="reference internal" href="#torch.nn.quantized.Conv2d" title="torch.nn.quantized.Conv2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.quantized.Conv2d</span></code></a>.</p>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><p><strong>as torch.nn.quantized.Conv2d</strong> (<em>Same</em>) – </p>
</dd>
</dl>
</dd></dl>

</div>
<div class="section" id="id8">
<h3>ConvReLU3d<a class="headerlink" href="#id8" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.quantized.ConvReLU3d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.quantized.</code><code class="sig-name descname">ConvReLU3d</code><span class="sig-paren">(</span><em class="sig-param">in_channels</em>, <em class="sig-param">out_channels</em>, <em class="sig-param">kernel_size</em>, <em class="sig-param">stride=1</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">groups=1</em>, <em class="sig-param">bias=True</em>, <em class="sig-param">padding_mode='zeros'</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/quantized/modules/conv_relu.html#ConvReLU3d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.quantized.ConvReLU3d" title="Permalink to this definition">¶</a></dt>
<dd><p>A ConvReLU3d module is a fused module of Conv3d and ReLU</p>
<p>We adopt the same interface as <a class="reference internal" href="#torch.nn.quantized.Conv3d" title="torch.nn.quantized.Conv3d"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.quantized.Conv3d</span></code></a>.</p>
<p>Attributes: Same as torch.nn.quantized.Conv3d</p>
</dd></dl>

</div>
<div class="section" id="id9">
<h3>LinearReLU<a class="headerlink" href="#id9" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.intrinsic.quantized.LinearReLU">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.intrinsic.quantized.</code><code class="sig-name descname">LinearReLU</code><span class="sig-paren">(</span><em class="sig-param">in_features</em>, <em class="sig-param">out_features</em>, <em class="sig-param">bias=True</em>, <em class="sig-param">dtype=torch.qint8</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/intrinsic/quantized/modules/linear_relu.html#LinearReLU"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.intrinsic.quantized.LinearReLU" title="Permalink to this definition">¶</a></dt>
<dd><p>A LinearReLU module fused from Linear and ReLU modules</p>
<p>We adopt the same interface as <a class="reference internal" href="#torch.nn.quantized.Linear" title="torch.nn.quantized.Linear"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.quantized.Linear</span></code></a>.</p>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><p><strong>as torch.nn.quantized.Linear</strong> (<em>Same</em>) – </p>
</dd>
</dl>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">intrinsic</span><span class="o">.</span><span class="n">LinearReLU</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">30</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">128</span><span class="p">,</span> <span class="mi">20</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">m</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">print</span><span class="p">(</span><span class="n">output</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
<span class="go">torch.Size([128, 30])</span>
</pre></div>
</div>
</dd></dl>

</div>
</div>
<div class="section" id="id10">
<h2>torch.nn.qat<a class="headerlink" href="#id10" title="Permalink to this headline">¶</a></h2>
<p>This module implements versions of the key nn modules <strong>Conv2d()</strong> and <strong>Linear()</strong> which
run in FP32 but with rounding applied to simulate the effect of INT8 quantization.</p>
<span class="target" id="module-torch.nn.qat"></span><div class="section" id="conv2d">
<h3>Conv2d<a class="headerlink" href="#conv2d" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.qat.Conv2d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.qat.</code><code class="sig-name descname">Conv2d</code><span class="sig-paren">(</span><em class="sig-param">in_channels</em>, <em class="sig-param">out_channels</em>, <em class="sig-param">kernel_size</em>, <em class="sig-param">stride=1</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">groups=1</em>, <em class="sig-param">bias=True</em>, <em class="sig-param">padding_mode='zeros'</em>, <em class="sig-param">qconfig=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/qat/modules/conv.html#Conv2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.qat.Conv2d" title="Permalink to this definition">¶</a></dt>
<dd><p>A Conv2d module attached with FakeQuantize modules for both output
activation and weight, used for quantization aware training.</p>
<p>We adopt the same interface as <cite>torch.nn.Conv2d</cite>, please see
<a class="reference external" href="https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d">https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d</a>
for documentation.</p>
<p>Similar to <cite>torch.nn.Conv2d</cite>, with FakeQuantize modules initialized to
default.</p>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>~Conv2d.activation_post_process</strong> – fake quant module for output activation</p></li>
<li><p><strong>~Conv2d.weight_fake_quant</strong> – fake quant module for weight</p></li>
</ul>
</dd>
</dl>
<dl class="method">
<dt id="torch.nn.qat.Conv2d.from_float">
<em class="property">classmethod </em><code class="sig-name descname">from_float</code><span class="sig-paren">(</span><em class="sig-param">mod</em>, <em class="sig-param">qconfig=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/qat/modules/conv.html#Conv2d.from_float"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.qat.Conv2d.from_float" title="Permalink to this definition">¶</a></dt>
<dd><p>Create a qat module from a float module or qparams_dict</p>
<p>Args: <cite>mod</cite> a float module, either produced by torch.quantization utilities
or directly from user</p>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="linear">
<h3>Linear<a class="headerlink" href="#linear" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.qat.Linear">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.qat.</code><code class="sig-name descname">Linear</code><span class="sig-paren">(</span><em class="sig-param">in_features</em>, <em class="sig-param">out_features</em>, <em class="sig-param">bias=True</em>, <em class="sig-param">qconfig=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/qat/modules/linear.html#Linear"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.qat.Linear" title="Permalink to this definition">¶</a></dt>
<dd><p>A linear module attached with FakeQuantize modules for both output
activation and weight, used for quantization aware training.</p>
<p>We adopt the same interface as <cite>torch.nn.Linear</cite>, please see
<a class="reference external" href="https://pytorch.org/docs/stable/nn.html#torch.nn.Linear">https://pytorch.org/docs/stable/nn.html#torch.nn.Linear</a>
for documentation.</p>
<p>Similar to <cite>torch.nn.Linear</cite>, with FakeQuantize modules initialized to
default.</p>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>~Linear.activation_post_process</strong> – fake quant module for output activation</p></li>
<li><p><strong>~Linear.weight</strong> – fake quant module for weight</p></li>
</ul>
</dd>
</dl>
<dl class="method">
<dt id="torch.nn.qat.Linear.from_float">
<em class="property">classmethod </em><code class="sig-name descname">from_float</code><span class="sig-paren">(</span><em class="sig-param">mod</em>, <em class="sig-param">qconfig=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/qat/modules/linear.html#Linear.from_float"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.qat.Linear.from_float" title="Permalink to this definition">¶</a></dt>
<dd><p>Create a qat module from a float module or qparams_dict</p>
<p>Args: <cite>mod</cite> a float module, either produced by torch.quantization utilities
or directly from user</p>
</dd></dl>

</dd></dl>

</div>
</div>
<div class="section" id="id11">
<h2>torch.nn.quantized<a class="headerlink" href="#id11" title="Permalink to this headline">¶</a></h2>
<p>This module implements the quantized versions of the nn layers such as <strong>Conv2d</strong> and <strong>ReLU</strong>.</p>
<div class="section" id="module-torch.nn.quantized.functional">
<span id="functional-interface"></span><h3>Functional interface<a class="headerlink" href="#module-torch.nn.quantized.functional" title="Permalink to this headline">¶</a></h3>
<p>Functional interface (quantized).</p>
<dl class="function">
<dt id="torch.nn.quantized.functional.relu">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">relu</code><span class="sig-paren">(</span><em class="sig-param">input</em>, <em class="sig-param">inplace=False</em><span class="sig-paren">)</span> &#x2192; Tensor<a class="reference internal" href="_modules/torch/nn/quantized/functional.html#relu"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.relu" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies the rectified linear unit function element-wise.
See <a class="reference internal" href="#torch.nn.quantized.ReLU" title="torch.nn.quantized.ReLU"><code class="xref py py-class docutils literal notranslate"><span class="pre">ReLU</span></code></a> for more details.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input</strong> – quantized input</p></li>
<li><p><strong>inplace</strong> – perform the computation inplace</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.nn.quantized.functional.linear">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">linear</code><span class="sig-paren">(</span><em class="sig-param">input: Tensor</em>, <em class="sig-param">weight: Tensor</em>, <em class="sig-param">bias: Optional[Tensor] = None</em>, <em class="sig-param">scale: Optional[float] = None</em>, <em class="sig-param">zero_point: Optional[int] = None</em><span class="sig-paren">)</span> &#x2192; Tensor<a class="reference internal" href="_modules/torch/nn/quantized/functional.html#linear"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.linear" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies a linear transformation to the incoming quantized data:
<span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>y</mi><mo>=</mo><mi>x</mi><msup><mi>A</mi><mi>T</mi></msup><mo>+</mo><mi>b</mi></mrow><annotation encoding="application/x-tex">y = xA^T + b</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.625em;vertical-align:-0.19444em;"></span><span class="mord mathdefault" style="margin-right:0.03588em;">y</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span></span><span class="base"><span class="strut" style="height:0.924661em;vertical-align:-0.08333em;"></span><span class="mord mathdefault">x</span><span class="mord"><span class="mord mathdefault">A</span><span class="msupsub"><span class="vlist-t"><span class="vlist-r"><span class="vlist" style="height:0.8413309999999999em;"><span style="top:-3.063em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mathdefault mtight" style="margin-right:0.13889em;">T</span></span></span></span></span></span></span></span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">+</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span></span><span class="base"><span class="strut" style="height:0.69444em;vertical-align:0em;"></span><span class="mord mathdefault">b</span></span></span></span>

</span>.
See <a class="reference internal" href="#torch.nn.quantized.Linear" title="torch.nn.quantized.Linear"><code class="xref py py-class docutils literal notranslate"><span class="pre">Linear</span></code></a></p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Current implementation packs weights on every call, which has penalty on performance.
If you want to avoid the overhead, use <a class="reference internal" href="#torch.nn.quantized.Linear" title="torch.nn.quantized.Linear"><code class="xref py py-class docutils literal notranslate"><span class="pre">Linear</span></code></a>.</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – Quantized input of type <cite>torch.quint8</cite></p></li>
<li><p><strong>weight</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – Quantized weight of type <cite>torch.qint8</cite></p></li>
<li><p><strong>bias</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – None or fp32 bias of type <cite>torch.float</cite></p></li>
<li><p><strong>scale</strong> (<em>double</em>) – output scale. If None, derived from the input scale</p></li>
<li><p><strong>zero_point</strong> (<em>long</em>) – output zero point. If None, derived from the input zero_point</p></li>
</ul>
</dd>
</dl>
<dl class="simple">
<dt>Shape:</dt><dd><ul class="simple">
<li><p>Input: <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mi>N</mi><mo separator="true">,</mo><mo>∗</mo><mo separator="true">,</mo><mi>i</mi><mi>n</mi><mi mathvariant="normal">_</mi><mi>f</mi><mi>e</mi><mi>a</mi><mi>t</mi><mi>u</mi><mi>r</mi><mi>e</mi><mi>s</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(N, *, in\_features)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.10903em;">N</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord">∗</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">i</span><span class="mord mathdefault">n</span><span class="mord" style="margin-right:0.02778em;">_</span><span class="mord mathdefault" style="margin-right:0.10764em;">f</span><span class="mord mathdefault">e</span><span class="mord mathdefault">a</span><span class="mord mathdefault">t</span><span class="mord mathdefault">u</span><span class="mord mathdefault" style="margin-right:0.02778em;">r</span><span class="mord mathdefault">e</span><span class="mord mathdefault">s</span><span class="mclose">)</span></span></span></span>

</span> where <cite>*</cite> means any number of
additional dimensions</p></li>
<li><p>Weight: <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mi>o</mi><mi>u</mi><mi>t</mi><mi mathvariant="normal">_</mi><mi>f</mi><mi>e</mi><mi>a</mi><mi>t</mi><mi>u</mi><mi>r</mi><mi>e</mi><mi>s</mi><mo separator="true">,</mo><mi>i</mi><mi>n</mi><mi mathvariant="normal">_</mi><mi>f</mi><mi>e</mi><mi>a</mi><mi>t</mi><mi>u</mi><mi>r</mi><mi>e</mi><mi>s</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(out\_features, in\_features)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord mathdefault">o</span><span class="mord mathdefault">u</span><span class="mord mathdefault">t</span><span class="mord" style="margin-right:0.02778em;">_</span><span class="mord mathdefault" style="margin-right:0.10764em;">f</span><span class="mord mathdefault">e</span><span class="mord mathdefault">a</span><span class="mord mathdefault">t</span><span class="mord mathdefault">u</span><span class="mord mathdefault" style="margin-right:0.02778em;">r</span><span class="mord mathdefault">e</span><span class="mord mathdefault">s</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">i</span><span class="mord mathdefault">n</span><span class="mord" style="margin-right:0.02778em;">_</span><span class="mord mathdefault" style="margin-right:0.10764em;">f</span><span class="mord mathdefault">e</span><span class="mord mathdefault">a</span><span class="mord mathdefault">t</span><span class="mord mathdefault">u</span><span class="mord mathdefault" style="margin-right:0.02778em;">r</span><span class="mord mathdefault">e</span><span class="mord mathdefault">s</span><span class="mclose">)</span></span></span></span>

</span></p></li>
<li><p>Bias: <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mi>o</mi><mi>u</mi><mi>t</mi><mi mathvariant="normal">_</mi><mi>f</mi><mi>e</mi><mi>a</mi><mi>t</mi><mi>u</mi><mi>r</mi><mi>e</mi><mi>s</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(out\_features)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord mathdefault">o</span><span class="mord mathdefault">u</span><span class="mord mathdefault">t</span><span class="mord" style="margin-right:0.02778em;">_</span><span class="mord mathdefault" style="margin-right:0.10764em;">f</span><span class="mord mathdefault">e</span><span class="mord mathdefault">a</span><span class="mord mathdefault">t</span><span class="mord mathdefault">u</span><span class="mord mathdefault" style="margin-right:0.02778em;">r</span><span class="mord mathdefault">e</span><span class="mord mathdefault">s</span><span class="mclose">)</span></span></span></span>

</span></p></li>
<li><p>Output: <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mi>N</mi><mo separator="true">,</mo><mo>∗</mo><mo separator="true">,</mo><mi>o</mi><mi>u</mi><mi>t</mi><mi mathvariant="normal">_</mi><mi>f</mi><mi>e</mi><mi>a</mi><mi>t</mi><mi>u</mi><mi>r</mi><mi>e</mi><mi>s</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(N, *, out\_features)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.10903em;">N</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord">∗</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">o</span><span class="mord mathdefault">u</span><span class="mord mathdefault">t</span><span class="mord" style="margin-right:0.02778em;">_</span><span class="mord mathdefault" style="margin-right:0.10764em;">f</span><span class="mord mathdefault">e</span><span class="mord mathdefault">a</span><span class="mord mathdefault">t</span><span class="mord mathdefault">u</span><span class="mord mathdefault" style="margin-right:0.02778em;">r</span><span class="mord mathdefault">e</span><span class="mord mathdefault">s</span><span class="mclose">)</span></span></span></span>

</span></p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.nn.quantized.functional.conv2d">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">conv2d</code><span class="sig-paren">(</span><em class="sig-param">input</em>, <em class="sig-param">weight</em>, <em class="sig-param">bias</em>, <em class="sig-param">stride=1</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">groups=1</em>, <em class="sig-param">padding_mode='zeros'</em>, <em class="sig-param">scale=1.0</em>, <em class="sig-param">zero_point=0</em>, <em class="sig-param">dtype=torch.quint8</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/functional.html#conv2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.conv2d" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies a 2D convolution over a quantized 2D input composed of several input
planes.</p>
<p>See <a class="reference internal" href="#torch.nn.quantized.Conv2d" title="torch.nn.quantized.Conv2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">Conv2d</span></code></a> for details and output shape.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input</strong> – quantized input tensor of shape <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>minibatch</mtext><mo separator="true">,</mo><mtext>in_channels</mtext><mo separator="true">,</mo><mi>i</mi><mi>H</mi><mo separator="true">,</mo><mi>i</mi><mi>W</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{minibatch} , \text{in\_channels} , iH , iW)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">minibatch</span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord text"><span class="mord">in_channels</span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">i</span><span class="mord mathdefault" style="margin-right:0.08125em;">H</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">i</span><span class="mord mathdefault" style="margin-right:0.13889em;">W</span><span class="mclose">)</span></span></span></span>

</span></p></li>
<li><p><strong>weight</strong> – quantized filters of shape <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>out_channels</mtext><mo separator="true">,</mo><mfrac><mtext>in_channels</mtext><mtext>groups</mtext></mfrac><mo separator="true">,</mo><mi>k</mi><mi>H</mi><mo separator="true">,</mo><mi>k</mi><mi>W</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kH , kW)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.4942159999999998em;vertical-align:-0.481108em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">out_channels</span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord"><span class="mopen nulldelimiter"></span><span class="mfrac"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.013108em;"><span style="top:-2.6550000000000002em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">groups</span></span></span></span></span><span style="top:-3.23em;"><span class="pstrut" style="height:3em;"></span><span class="frac-line" style="border-bottom-width:0.04em;"></span></span><span style="top:-3.527em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">in_channels</span></span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.481108em;"><span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault" style="margin-right:0.03148em;">k</span><span class="mord mathdefault" style="margin-right:0.08125em;">H</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault" style="margin-right:0.03148em;">k</span><span class="mord mathdefault" style="margin-right:0.13889em;">W</span><span class="mclose">)</span></span></span></span>

</span></p></li>
<li><p><strong>bias</strong> – <strong>non-quantized</strong> bias tensor of shape <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>out_channels</mtext><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{out\_channels})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">out_channels</span></span><span class="mclose">)</span></span></span></span>

</span>. The tensor type must be <cite>torch.float</cite>.</p></li>
<li><p><strong>stride</strong> – the stride of the convolving kernel. Can be a single number or a
tuple <cite>(sH, sW)</cite>. Default: 1</p></li>
<li><p><strong>padding</strong> – implicit paddings on both sides of the input. Can be a
single number or a tuple <cite>(padH, padW)</cite>. Default: 0</p></li>
<li><p><strong>dilation</strong> – the spacing between kernel elements. Can be a single number or
a tuple <cite>(dH, dW)</cite>. Default: 1</p></li>
<li><p><strong>groups</strong> – split input into groups, <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mtext>in_channels</mtext></mrow><annotation encoding="application/x-tex">\text{in\_channels}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.00444em;vertical-align:-0.31em;"></span><span class="mord text"><span class="mord">in_channels</span></span></span></span></span>

</span> should be divisible by the
number of groups. Default: 1</p></li>
<li><p><strong>padding_mode</strong> – the padding mode to use. Only “zeros” is supported for quantized convolution at the moment. Default: “zeros”</p></li>
<li><p><strong>scale</strong> – quantization scale for the output. Default: 1.0</p></li>
<li><p><strong>zero_point</strong> – quantization zero_point for the output. Default: 0</p></li>
<li><p><strong>dtype</strong> – quantization data type to use. Default: <code class="docutils literal notranslate"><span class="pre">torch.quint8</span></code></p></li>
</ul>
</dd>
</dl>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">torch.nn.quantized</span> <span class="kn">import</span> <span class="n">functional</span> <span class="k">as</span> <span class="n">qF</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">filters</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">8</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">inputs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">bias</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">8</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float</span><span class="p">)</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span> <span class="o">=</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mi">0</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dtype_inputs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quint8</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dtype_filters</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">qint8</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">q_filters</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="n">filters</span><span class="p">,</span> <span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="p">,</span> <span class="n">dtype_filters</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">q_inputs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="n">inputs</span><span class="p">,</span> <span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="p">,</span> <span class="n">dtype_inputs</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">qF</span><span class="o">.</span><span class="n">conv2d</span><span class="p">(</span><span class="n">q_inputs</span><span class="p">,</span> <span class="n">q_filters</span><span class="p">,</span> <span class="n">bias</span><span class="p">,</span> <span class="n">padding</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">scale</span><span class="o">=</span><span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="o">=</span><span class="n">zero_point</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="function">
<dt id="torch.nn.quantized.functional.conv3d">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">conv3d</code><span class="sig-paren">(</span><em class="sig-param">input</em>, <em class="sig-param">weight</em>, <em class="sig-param">bias</em>, <em class="sig-param">stride=1</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">groups=1</em>, <em class="sig-param">padding_mode='zeros'</em>, <em class="sig-param">scale=1.0</em>, <em class="sig-param">zero_point=0</em>, <em class="sig-param">dtype=torch.quint8</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/functional.html#conv3d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.conv3d" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies a 3D convolution over a quantized 3D input composed of several input
planes.</p>
<p>See <a class="reference internal" href="#torch.nn.quantized.Conv3d" title="torch.nn.quantized.Conv3d"><code class="xref py py-class docutils literal notranslate"><span class="pre">Conv3d</span></code></a> for details and output shape.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input</strong> – quantized input tensor of shape
<span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>minibatch</mtext><mo separator="true">,</mo><mtext>in_channels</mtext><mo separator="true">,</mo><mi>i</mi><mi>D</mi><mo separator="true">,</mo><mi>i</mi><mi>H</mi><mo separator="true">,</mo><mi>i</mi><mi>W</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{minibatch} , \text{in\_channels} , iD , iH , iW)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">minibatch</span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord text"><span class="mord">in_channels</span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">i</span><span class="mord mathdefault" style="margin-right:0.02778em;">D</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">i</span><span class="mord mathdefault" style="margin-right:0.08125em;">H</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">i</span><span class="mord mathdefault" style="margin-right:0.13889em;">W</span><span class="mclose">)</span></span></span></span>

</span></p></li>
<li><p><strong>weight</strong> – quantized filters of shape
<span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>out_channels</mtext><mo separator="true">,</mo><mfrac><mtext>in_channels</mtext><mtext>groups</mtext></mfrac><mo separator="true">,</mo><mi>k</mi><mi>D</mi><mo separator="true">,</mo><mi>k</mi><mi>H</mi><mo separator="true">,</mo><mi>k</mi><mi>W</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{out\_channels} , \frac{\text{in\_channels}}{\text{groups}} , kD , kH , kW)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.4942159999999998em;vertical-align:-0.481108em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">out_channels</span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord"><span class="mopen nulldelimiter"></span><span class="mfrac"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:1.013108em;"><span style="top:-2.6550000000000002em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">groups</span></span></span></span></span><span style="top:-3.23em;"><span class="pstrut" style="height:3em;"></span><span class="frac-line" style="border-bottom-width:0.04em;"></span></span><span style="top:-3.527em;"><span class="pstrut" style="height:3em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight"><span class="mord text mtight"><span class="mord mtight">in_channels</span></span></span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.481108em;"><span></span></span></span></span></span><span class="mclose nulldelimiter"></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault" style="margin-right:0.03148em;">k</span><span class="mord mathdefault" style="margin-right:0.02778em;">D</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault" style="margin-right:0.03148em;">k</span><span class="mord mathdefault" style="margin-right:0.08125em;">H</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault" style="margin-right:0.03148em;">k</span><span class="mord mathdefault" style="margin-right:0.13889em;">W</span><span class="mclose">)</span></span></span></span>

</span></p></li>
<li><p><strong>bias</strong> – <strong>non-quantized</strong> bias tensor of shape
<span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>out_channels</mtext><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{out\_channels})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">out_channels</span></span><span class="mclose">)</span></span></span></span>

</span>. The tensor type must be <cite>torch.float</cite>.</p></li>
<li><p><strong>stride</strong> – the stride of the convolving kernel. Can be a single number or a
tuple <cite>(sD, sH, sW)</cite>. Default: 1</p></li>
<li><p><strong>padding</strong> – implicit paddings on both sides of the input. Can be a
single number or a tuple <cite>(padD, padH, padW)</cite>. Default: 0</p></li>
<li><p><strong>dilation</strong> – the spacing between kernel elements. Can be a single number or
a tuple <cite>(dD, dH, dW)</cite>. Default: 1</p></li>
<li><p><strong>groups</strong> – split input into groups, <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mtext>in_channels</mtext></mrow><annotation encoding="application/x-tex">\text{in\_channels}</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.00444em;vertical-align:-0.31em;"></span><span class="mord text"><span class="mord">in_channels</span></span></span></span></span>

</span> should be
divisible by the number of groups. Default: 1</p></li>
<li><p><strong>padding_mode</strong> – the padding mode to use. Only “zeros” is supported for
quantized convolution at the moment. Default: “zeros”</p></li>
<li><p><strong>scale</strong> – quantization scale for the output. Default: 1.0</p></li>
<li><p><strong>zero_point</strong> – quantization zero_point for the output. Default: 0</p></li>
<li><p><strong>dtype</strong> – quantization data type to use. Default: <code class="docutils literal notranslate"><span class="pre">torch.quint8</span></code></p></li>
</ul>
</dd>
</dl>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="kn">from</span> <span class="nn">torch.nn.quantized</span> <span class="kn">import</span> <span class="n">functional</span> <span class="k">as</span> <span class="n">qF</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">filters</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">8</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">inputs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">4</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">bias</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">8</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">float</span><span class="p">)</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span> <span class="o">=</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mi">0</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dtype_inputs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quint8</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dtype_filters</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">qint8</span>
<span class="go">&gt;&gt;&gt;</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">q_filters</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="n">filters</span><span class="p">,</span> <span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="p">,</span> <span class="n">dtype_filters</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">q_inputs</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="n">inputs</span><span class="p">,</span> <span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="p">,</span> <span class="n">dtype_inputs</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">qF</span><span class="o">.</span><span class="n">conv3d</span><span class="p">(</span><span class="n">q_inputs</span><span class="p">,</span> <span class="n">q_filters</span><span class="p">,</span> <span class="n">bias</span><span class="p">,</span> <span class="n">padding</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span> <span class="n">scale</span><span class="o">=</span><span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="o">=</span><span class="n">zero_point</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

<dl class="function">
<dt id="torch.nn.quantized.functional.max_pool2d">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">max_pool2d</code><span class="sig-paren">(</span><em class="sig-param">input</em>, <em class="sig-param">kernel_size</em>, <em class="sig-param">stride=None</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">ceil_mode=False</em>, <em class="sig-param">return_indices=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/functional.html#max_pool2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.max_pool2d" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies a 2D max pooling over a quantized input signal composed of
several quantized input planes.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The input quantization parameters are propagated to the output.</p>
</div>
<p>See <code class="xref py py-class docutils literal notranslate"><span class="pre">MaxPool2d</span></code> for details.</p>
</dd></dl>

<dl class="function">
<dt id="torch.nn.quantized.functional.adaptive_avg_pool2d">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">adaptive_avg_pool2d</code><span class="sig-paren">(</span><em class="sig-param">input: Tensor, output_size: BroadcastingList2[int]</em><span class="sig-paren">)</span> &#x2192; Tensor<a class="reference internal" href="_modules/torch/nn/quantized/functional.html#adaptive_avg_pool2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.adaptive_avg_pool2d" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies a 2D adaptive average pooling over a quantized input signal composed
of several quantized input planes.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The input quantization paramteres propagate to the output.</p>
</div>
<p>See <code class="xref py py-class docutils literal notranslate"><span class="pre">AdaptiveAvgPool2d</span></code> for details and output shape.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>output_size</strong> – the target output size (single integer or
double-integer tuple)</p>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.nn.quantized.functional.avg_pool2d">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">avg_pool2d</code><span class="sig-paren">(</span><em class="sig-param">input</em>, <em class="sig-param">kernel_size</em>, <em class="sig-param">stride=None</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">ceil_mode=False</em>, <em class="sig-param">count_include_pad=True</em>, <em class="sig-param">divisor_override=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/functional.html#avg_pool2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.avg_pool2d" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies 2D average-pooling operation in <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>k</mi><mi>H</mi><mo>×</mo><mi>k</mi><mi>W</mi></mrow><annotation encoding="application/x-tex">kH \times kW</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.77777em;vertical-align:-0.08333em;"></span><span class="mord mathdefault" style="margin-right:0.03148em;">k</span><span class="mord mathdefault" style="margin-right:0.08125em;">H</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">×</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span></span><span class="base"><span class="strut" style="height:0.69444em;vertical-align:0em;"></span><span class="mord mathdefault" style="margin-right:0.03148em;">k</span><span class="mord mathdefault" style="margin-right:0.13889em;">W</span></span></span></span>

</span> regions by step size
<span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>s</mi><mi>H</mi><mo>×</mo><mi>s</mi><mi>W</mi></mrow><annotation encoding="application/x-tex">sH \times sW</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.76666em;vertical-align:-0.08333em;"></span><span class="mord mathdefault">s</span><span class="mord mathdefault" style="margin-right:0.08125em;">H</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span><span class="mbin">×</span><span class="mspace" style="margin-right:0.2222222222222222em;"></span></span><span class="base"><span class="strut" style="height:0.68333em;vertical-align:0em;"></span><span class="mord mathdefault">s</span><span class="mord mathdefault" style="margin-right:0.13889em;">W</span></span></span></span>

</span> steps. The number of output features is equal to the number of
input planes.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The input quantization parameters propagate to the output.</p>
</div>
<p>See <code class="xref py py-class docutils literal notranslate"><span class="pre">AvgPool2d</span></code> for details and output shape.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input</strong> – quantized input tensor <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>minibatch</mtext><mo separator="true">,</mo><mtext>in_channels</mtext><mo separator="true">,</mo><mi>i</mi><mi>H</mi><mo separator="true">,</mo><mi>i</mi><mi>W</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{minibatch} , \text{in\_channels} , iH , iW)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">minibatch</span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord text"><span class="mord">in_channels</span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">i</span><span class="mord mathdefault" style="margin-right:0.08125em;">H</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">i</span><span class="mord mathdefault" style="margin-right:0.13889em;">W</span><span class="mclose">)</span></span></span></span>

</span></p></li>
<li><p><strong>kernel_size</strong> – size of the pooling region. Can be a single number or a
tuple <cite>(kH, kW)</cite></p></li>
<li><p><strong>stride</strong> – stride of the pooling operation. Can be a single number or a
tuple <cite>(sH, sW)</cite>. Default: <code class="xref py py-attr docutils literal notranslate"><span class="pre">kernel_size</span></code></p></li>
<li><p><strong>padding</strong> – implicit zero paddings on both sides of the input. Can be a
single number or a tuple <cite>(padH, padW)</cite>. Default: 0</p></li>
<li><p><strong>ceil_mode</strong> – when True, will use <cite>ceil</cite> instead of <cite>floor</cite> in the formula
to compute the output shape. Default: <code class="docutils literal notranslate"><span class="pre">False</span></code></p></li>
<li><p><strong>count_include_pad</strong> – when True, will include the zero-padding in the
averaging calculation. Default: <code class="docutils literal notranslate"><span class="pre">True</span></code></p></li>
<li><p><strong>divisor_override</strong> – if specified, it will be used as divisor, otherwise
size of the pooling region will be used. Default: None</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.nn.quantized.functional.interpolate">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">interpolate</code><span class="sig-paren">(</span><em class="sig-param">input</em>, <em class="sig-param">size=None</em>, <em class="sig-param">scale_factor=None</em>, <em class="sig-param">mode='nearest'</em>, <em class="sig-param">align_corners=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/functional.html#interpolate"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.interpolate" title="Permalink to this definition">¶</a></dt>
<dd><p>Down/up samples the input to either the given <code class="xref py py-attr docutils literal notranslate"><span class="pre">size</span></code> or the given
<code class="xref py py-attr docutils literal notranslate"><span class="pre">scale_factor</span></code></p>
<p>See <a class="reference internal" href="nn.functional.html#torch.nn.functional.interpolate" title="torch.nn.functional.interpolate"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.nn.functional.interpolate()</span></code></a> for implementation details.</p>
<p>The input dimensions are interpreted in the form:
<cite>mini-batch x channels x [optional depth] x [optional height] x width</cite>.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The input quantization parameters propagate to the output.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only 2D/3D input is supported for quantized inputs</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only the following modes are supported for the quantized inputs:</p>
<ul class="simple">
<li><p><cite>bilinear</cite></p></li>
<li><p><cite>nearest</cite></p></li>
</ul>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – the input tensor</p></li>
<li><p><strong>size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em> or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>] or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>] or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>]</em>) – output spatial size.</p></li>
<li><p><strong>scale_factor</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.8)"><em>float</em></a><em> or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.8)"><em>float</em></a><em>]</em>) – multiplier for spatial size. Has to match input size if it is a tuple.</p></li>
<li><p><strong>mode</strong> (<a class="reference external" href="https://docs.python.org/3/library/stdtypes.html#str" title="(in Python v3.8)"><em>str</em></a>) – algorithm used for upsampling:
<code class="docutils literal notranslate"><span class="pre">'nearest'</span></code> | <code class="docutils literal notranslate"><span class="pre">'bilinear'</span></code></p></li>
<li><p><strong>align_corners</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.8)"><em>bool</em></a><em>, </em><em>optional</em>) – Geometrically, we consider the pixels of the
input and output as squares rather than points.
If set to <code class="docutils literal notranslate"><span class="pre">True</span></code>, the input and output tensors are aligned by the
center points of their corner pixels, preserving the values at the corner pixels.
If set to <code class="docutils literal notranslate"><span class="pre">False</span></code>, the input and output tensors are aligned by the corner
points of their corner pixels, and the interpolation uses edge value padding
for out-of-boundary values, making this operation <em>independent</em> of input size
when <code class="xref py py-attr docutils literal notranslate"><span class="pre">scale_factor</span></code> is kept the same. This only has an effect when <code class="xref py py-attr docutils literal notranslate"><span class="pre">mode</span></code>
is <code class="docutils literal notranslate"><span class="pre">'bilinear'</span></code>.
Default: <code class="docutils literal notranslate"><span class="pre">False</span></code></p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.nn.quantized.functional.upsample">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">upsample</code><span class="sig-paren">(</span><em class="sig-param">input</em>, <em class="sig-param">size=None</em>, <em class="sig-param">scale_factor=None</em>, <em class="sig-param">mode='nearest'</em>, <em class="sig-param">align_corners=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/functional.html#upsample"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.upsample" title="Permalink to this definition">¶</a></dt>
<dd><p>Upsamples the input to either the given <code class="xref py py-attr docutils literal notranslate"><span class="pre">size</span></code> or the given
<code class="xref py py-attr docutils literal notranslate"><span class="pre">scale_factor</span></code></p>
<div class="admonition warning">
<p class="admonition-title">Warning</p>
<p>This function is deprecated in favor of
<a class="reference internal" href="#torch.nn.quantized.functional.interpolate" title="torch.nn.quantized.functional.interpolate"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.nn.quantized.functional.interpolate()</span></code></a>.
This is equivalent with <code class="docutils literal notranslate"><span class="pre">nn.quantized.functional.interpolate(...)</span></code>.</p>
</div>
<p>See <a class="reference internal" href="nn.functional.html#torch.nn.functional.interpolate" title="torch.nn.functional.interpolate"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.nn.functional.interpolate()</span></code></a> for implementation details.</p>
<p>The input dimensions are interpreted in the form:
<cite>mini-batch x channels x [optional depth] x [optional height] x width</cite>.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The input quantization parameters propagate to the output.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only 2D input is supported for quantized inputs</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only the following modes are supported for the quantized inputs:</p>
<ul class="simple">
<li><p><cite>bilinear</cite></p></li>
<li><p><cite>nearest</cite></p></li>
</ul>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – quantized input tensor</p></li>
<li><p><strong>size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em> or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>] or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>] or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>]</em>) – output spatial size.</p></li>
<li><p><strong>scale_factor</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.8)"><em>float</em></a><em> or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#float" title="(in Python v3.8)"><em>float</em></a><em>]</em>) – multiplier for spatial size. Has to be an integer.</p></li>
<li><p><strong>mode</strong> (<em>string</em>) – algorithm used for upsampling:
<code class="docutils literal notranslate"><span class="pre">'nearest'</span></code> | <code class="docutils literal notranslate"><span class="pre">'bilinear'</span></code></p></li>
<li><p><strong>align_corners</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#bool" title="(in Python v3.8)"><em>bool</em></a><em>, </em><em>optional</em>) – Geometrically, we consider the pixels of the
input and output as squares rather than points.
If set to <code class="docutils literal notranslate"><span class="pre">True</span></code>, the input and output tensors are aligned by the
center points of their corner pixels, preserving the values at the corner pixels.
If set to <code class="docutils literal notranslate"><span class="pre">False</span></code>, the input and output tensors are aligned by the corner
points of their corner pixels, and the interpolation uses edge value padding
for out-of-boundary values, making this operation <em>independent</em> of input size
when <code class="xref py py-attr docutils literal notranslate"><span class="pre">scale_factor</span></code> is kept the same. This only has an effect when <code class="xref py py-attr docutils literal notranslate"><span class="pre">mode</span></code>
is <code class="docutils literal notranslate"><span class="pre">'bilinear'</span></code>.
Default: <code class="docutils literal notranslate"><span class="pre">False</span></code></p></li>
</ul>
</dd>
</dl>
<div class="admonition warning">
<p class="admonition-title">Warning</p>
<p>With <code class="docutils literal notranslate"><span class="pre">align_corners</span> <span class="pre">=</span> <span class="pre">True</span></code>, the linearly interpolating modes
(<cite>bilinear</cite>) don’t proportionally align the
output and input pixels, and thus the output values can depend on the
input size. This was the default behavior for these modes up to version
0.3.1. Since then, the default behavior is <code class="docutils literal notranslate"><span class="pre">align_corners</span> <span class="pre">=</span> <span class="pre">False</span></code>.
See <a class="reference internal" href="nn.html#torch.nn.Upsample" title="torch.nn.Upsample"><code class="xref py py-class docutils literal notranslate"><span class="pre">Upsample</span></code></a> for concrete examples on how this
affects the outputs.</p>
</div>
</dd></dl>

<dl class="function">
<dt id="torch.nn.quantized.functional.upsample_bilinear">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">upsample_bilinear</code><span class="sig-paren">(</span><em class="sig-param">input</em>, <em class="sig-param">size=None</em>, <em class="sig-param">scale_factor=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/functional.html#upsample_bilinear"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.upsample_bilinear" title="Permalink to this definition">¶</a></dt>
<dd><p>Upsamples the input, using bilinear upsampling.</p>
<div class="admonition warning">
<p class="admonition-title">Warning</p>
<p>This function is deprecated in favor of
<a class="reference internal" href="#torch.nn.quantized.functional.interpolate" title="torch.nn.quantized.functional.interpolate"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.nn.quantized.functional.interpolate()</span></code></a>.
This is equivalent with
<code class="docutils literal notranslate"><span class="pre">nn.quantized.functional.interpolate(...,</span> <span class="pre">mode='bilinear',</span> <span class="pre">align_corners=True)</span></code>.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The input quantization parameters propagate to the output.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only 2D inputs are supported</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – quantized input</p></li>
<li><p><strong>size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em> or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>]</em>) – output spatial size.</p></li>
<li><p><strong>scale_factor</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em> or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>]</em>) – multiplier for spatial size</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<dl class="function">
<dt id="torch.nn.quantized.functional.upsample_nearest">
<code class="sig-prename descclassname">torch.nn.quantized.functional.</code><code class="sig-name descname">upsample_nearest</code><span class="sig-paren">(</span><em class="sig-param">input</em>, <em class="sig-param">size=None</em>, <em class="sig-param">scale_factor=None</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/functional.html#upsample_nearest"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.functional.upsample_nearest" title="Permalink to this definition">¶</a></dt>
<dd><p>Upsamples the input, using nearest neighbours’ pixel values.</p>
<div class="admonition warning">
<p class="admonition-title">Warning</p>
<p>This function is deprecated in favor of
<a class="reference internal" href="#torch.nn.quantized.functional.interpolate" title="torch.nn.quantized.functional.interpolate"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.nn.quantized.functional.interpolate()</span></code></a>.
This is equivalent with <code class="docutils literal notranslate"><span class="pre">nn.quantized.functional.interpolate(...,</span> <span class="pre">mode='nearest')</span></code>.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>The input quantization parameters propagate to the output.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only 2D inputs are supported</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>input</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – quantized input</p></li>
<li><p><strong>size</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em> or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>] or </em><em>Tuple</em><em>[</em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>, </em><a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a><em>]</em>) – output spatial
size.</p></li>
<li><p><strong>scale_factor</strong> (<a class="reference external" href="https://docs.python.org/3/library/functions.html#int" title="(in Python v3.8)"><em>int</em></a>) – multiplier for spatial size. Has to be an integer.</p></li>
</ul>
</dd>
</dl>
</dd></dl>

<span class="target" id="module-torch.nn.quantized"></span></div>
<div class="section" id="relu">
<h3>ReLU<a class="headerlink" href="#relu" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.ReLU">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.</code><code class="sig-name descname">ReLU</code><span class="sig-paren">(</span><em class="sig-param">inplace=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/modules/activation.html#ReLU"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.ReLU" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies quantized rectified linear unit function element-wise:</p>
<p><span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mtext>ReLU</mtext><mo stretchy="false">(</mo><mi>x</mi><mo stretchy="false">)</mo><mo>=</mo><mi>max</mi><mo>⁡</mo><mo stretchy="false">(</mo><msub><mi>x</mi><mn>0</mn></msub><mo separator="true">,</mo><mi>x</mi><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">\text{ReLU}(x)= \max(x_0, x)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord text"><span class="mord">ReLU</span></span><span class="mopen">(</span><span class="mord mathdefault">x</span><span class="mclose">)</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mop">max</span><span class="mopen">(</span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.30110799999999993em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">0</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">x</span><span class="mclose">)</span></span></span></span>

</span>, where <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mn>0</mn></msub></mrow><annotation encoding="application/x-tex">x_0</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.58056em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.30110799999999993em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">0</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>

</span> is the zero point.</p>
<p>Please see <a class="reference external" href="https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU">https://pytorch.org/docs/stable/nn.html#torch.nn.ReLU</a>
for more documentation on ReLU.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>inplace</strong> – (Currently not supported) can optionally do the operation in-place.</p>
</dd>
</dl>
<dl class="simple">
<dt>Shape:</dt><dd><ul class="simple">
<li><p>Input: <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mi>N</mi><mo separator="true">,</mo><mo>∗</mo><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(N, *)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.10903em;">N</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord">∗</span><span class="mclose">)</span></span></span></span>

</span> where <cite>*</cite> means, any number of additional
dimensions</p></li>
<li><p>Output: <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mi>N</mi><mo separator="true">,</mo><mo>∗</mo><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(N, *)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.10903em;">N</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord">∗</span><span class="mclose">)</span></span></span></span>

</span>, same shape as the input</p></li>
</ul>
</dd>
</dl>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">quantized</span><span class="o">.</span><span class="n">ReLU</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">qint32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">m</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

</div>
<div class="section" id="relu6">
<h3>ReLU6<a class="headerlink" href="#relu6" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.ReLU6">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.</code><code class="sig-name descname">ReLU6</code><span class="sig-paren">(</span><em class="sig-param">inplace=False</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/modules/activation.html#ReLU6"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.ReLU6" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies the element-wise function:</p>
<p><span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mtext>ReLU6</mtext><mo stretchy="false">(</mo><mi>x</mi><mo stretchy="false">)</mo><mo>=</mo><mi>min</mi><mo>⁡</mo><mo stretchy="false">(</mo><mi>max</mi><mo>⁡</mo><mo stretchy="false">(</mo><msub><mi>x</mi><mn>0</mn></msub><mo separator="true">,</mo><mi>x</mi><mo stretchy="false">)</mo><mo separator="true">,</mo><mi>q</mi><mo stretchy="false">(</mo><mn>6</mn><mo stretchy="false">)</mo><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">\text{ReLU6}(x) = \min(\max(x_0, x), q(6))</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord text"><span class="mord">ReLU6</span></span><span class="mopen">(</span><span class="mord mathdefault">x</span><span class="mclose">)</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span><span class="mrel">=</span><span class="mspace" style="margin-right:0.2777777777777778em;"></span></span><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mop">min</span><span class="mopen">(</span><span class="mop">max</span><span class="mopen">(</span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.30110799999999993em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">0</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault">x</span><span class="mclose">)</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord mathdefault" style="margin-right:0.03588em;">q</span><span class="mopen">(</span><span class="mord">6</span><span class="mclose">)</span><span class="mclose">)</span></span></span></span>

</span>, where <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><msub><mi>x</mi><mn>0</mn></msub></mrow><annotation encoding="application/x-tex">x_0</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:0.58056em;vertical-align:-0.15em;"></span><span class="mord"><span class="mord mathdefault">x</span><span class="msupsub"><span class="vlist-t vlist-t2"><span class="vlist-r"><span class="vlist" style="height:0.30110799999999993em;"><span style="top:-2.5500000000000003em;margin-left:0em;margin-right:0.05em;"><span class="pstrut" style="height:2.7em;"></span><span class="sizing reset-size6 size3 mtight"><span class="mord mtight">0</span></span></span></span><span class="vlist-s">​</span></span><span class="vlist-r"><span class="vlist" style="height:0.15em;"><span></span></span></span></span></span></span></span></span></span>

</span> is the
zero_point, and <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mi>q</mi><mo stretchy="false">(</mo><mn>6</mn><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">q(6)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mord mathdefault" style="margin-right:0.03588em;">q</span><span class="mopen">(</span><span class="mord">6</span><span class="mclose">)</span></span></span></span>

</span> is the quantized representation of number 6.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>inplace</strong> – can optionally do the operation in-place. Default: <code class="docutils literal notranslate"><span class="pre">False</span></code></p>
</dd>
</dl>
<dl class="simple">
<dt>Shape:</dt><dd><ul class="simple">
<li><p>Input: <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mi>N</mi><mo separator="true">,</mo><mo>∗</mo><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(N, *)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.10903em;">N</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord">∗</span><span class="mclose">)</span></span></span></span>

</span> where <cite>*</cite> means, any number of additional
dimensions</p></li>
<li><p>Output: <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mi>N</mi><mo separator="true">,</mo><mo>∗</mo><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(N, *)</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1em;vertical-align:-0.25em;"></span><span class="mopen">(</span><span class="mord mathdefault" style="margin-right:0.10903em;">N</span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord">∗</span><span class="mclose">)</span></span></span></span>

</span>, same shape as the input</p></li>
</ul>
</dd>
</dl>
<img alt="_images/ReLU6.png" src="_images/ReLU6.png" />
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">quantized</span><span class="o">.</span><span class="n">ReLU6</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">qint32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">m</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
</pre></div>
</div>
</dd></dl>

</div>
<div class="section" id="id12">
<h3>Conv2d<a class="headerlink" href="#id12" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.Conv2d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.</code><code class="sig-name descname">Conv2d</code><span class="sig-paren">(</span><em class="sig-param">in_channels</em>, <em class="sig-param">out_channels</em>, <em class="sig-param">kernel_size</em>, <em class="sig-param">stride=1</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">groups=1</em>, <em class="sig-param">bias=True</em>, <em class="sig-param">padding_mode='zeros'</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/modules/conv.html#Conv2d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.Conv2d" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies a 2D convolution over a quantized input signal composed of
several quantized input planes.</p>
<p>For details on input arguments, parameters, and implementation see
<a class="reference internal" href="nn.html#torch.nn.Conv2d" title="torch.nn.Conv2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">Conv2d</span></code></a>.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only <cite>zeros</cite> is supported for the <code class="xref py py-attr docutils literal notranslate"><span class="pre">padding_mode</span></code> argument.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only <cite>torch.quint8</cite> is supported for the input data type.</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>~Conv2d.weight</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – packed tensor derived from the learnable weight
parameter.</p></li>
<li><p><strong>~Conv2d.scale</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – scalar for the output scale</p></li>
<li><p><strong>~Conv2d.zero_point</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – scalar for the output zero point</p></li>
</ul>
</dd>
</dl>
<p>See <a class="reference internal" href="nn.html#torch.nn.Conv2d" title="torch.nn.Conv2d"><code class="xref py py-class docutils literal notranslate"><span class="pre">Conv2d</span></code></a> for other attributes.</p>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="c1"># With square kernels and equal stride</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">quantized</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">16</span><span class="p">,</span> <span class="mi">33</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">stride</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># non-square kernels and unequal stride and with padding</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">quantized</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">16</span><span class="p">,</span> <span class="mi">33</span><span class="p">,</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="n">padding</span><span class="o">=</span><span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">))</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># non-square kernels and unequal stride and with padding and dilation</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">quantized</span><span class="o">.</span><span class="n">Conv2d</span><span class="p">(</span><span class="mi">16</span><span class="p">,</span> <span class="mi">33</span><span class="p">,</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span> <span class="n">padding</span><span class="o">=</span><span class="p">(</span><span class="mi">4</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span> <span class="n">dilation</span><span class="o">=</span><span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">50</span><span class="p">,</span> <span class="mi">100</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># quantize input to qint8</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">q_input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">scale</span><span class="o">=</span><span class="mf">1.0</span><span class="p">,</span> <span class="n">zero_point</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">qint32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">m</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
</pre></div>
</div>
<dl class="method">
<dt id="torch.nn.quantized.Conv2d.from_float">
<em class="property">classmethod </em><code class="sig-name descname">from_float</code><span class="sig-paren">(</span><em class="sig-param">mod</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/modules/conv.html#Conv2d.from_float"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.Conv2d.from_float" title="Permalink to this definition">¶</a></dt>
<dd><p>Creates a quantized module from a float module or qparams_dict.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>mod</strong> (<a class="reference internal" href="nn.html#torch.nn.Module" title="torch.nn.Module"><em>Module</em></a>) – a float module, either produced by torch.quantization
utilities or provided by the user</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="conv3d">
<h3>Conv3d<a class="headerlink" href="#conv3d" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.Conv3d">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.</code><code class="sig-name descname">Conv3d</code><span class="sig-paren">(</span><em class="sig-param">in_channels</em>, <em class="sig-param">out_channels</em>, <em class="sig-param">kernel_size</em>, <em class="sig-param">stride=1</em>, <em class="sig-param">padding=0</em>, <em class="sig-param">dilation=1</em>, <em class="sig-param">groups=1</em>, <em class="sig-param">bias=True</em>, <em class="sig-param">padding_mode='zeros'</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/modules/conv.html#Conv3d"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.Conv3d" title="Permalink to this definition">¶</a></dt>
<dd><p>Applies a 3D convolution over a quantized input signal composed of
several quantized input planes.</p>
<p>For details on input arguments, parameters, and implementation see
<a class="reference internal" href="nn.html#torch.nn.Conv3d" title="torch.nn.Conv3d"><code class="xref py py-class docutils literal notranslate"><span class="pre">Conv3d</span></code></a>.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only <cite>zeros</cite> is supported for the <code class="xref py py-attr docutils literal notranslate"><span class="pre">padding_mode</span></code> argument.</p>
</div>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Only <cite>torch.quint8</cite> is supported for the input data type.</p>
</div>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>~Conv3d.weight</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – packed tensor derived from the learnable weight
parameter.</p></li>
<li><p><strong>~Conv3d.scale</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – scalar for the output scale</p></li>
<li><p><strong>~Conv3d.zero_point</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – scalar for the output zero point</p></li>
</ul>
</dd>
</dl>
<p>See <a class="reference internal" href="nn.html#torch.nn.Conv3d" title="torch.nn.Conv3d"><code class="xref py py-class docutils literal notranslate"><span class="pre">Conv3d</span></code></a> for other attributes.</p>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="c1"># With square kernels and equal stride</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">quantized</span><span class="o">.</span><span class="n">Conv3d</span><span class="p">(</span><span class="mi">16</span><span class="p">,</span> <span class="mi">33</span><span class="p">,</span> <span class="mi">3</span><span class="p">,</span> <span class="n">stride</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># non-square kernels and unequal stride and with padding</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">quantized</span><span class="o">.</span><span class="n">Conv3d</span><span class="p">(</span><span class="mi">16</span><span class="p">,</span> <span class="mi">33</span><span class="p">,</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span> <span class="n">padding</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">))</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># non-square kernels and unequal stride and with padding and dilation</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">quantized</span><span class="o">.</span><span class="n">Conv3d</span><span class="p">(</span><span class="mi">16</span><span class="p">,</span> <span class="mi">33</span><span class="p">,</span> <span class="p">(</span><span class="mi">3</span><span class="p">,</span> <span class="mi">5</span><span class="p">,</span> <span class="mi">5</span><span class="p">),</span> <span class="n">stride</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span> <span class="n">padding</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">),</span> <span class="n">dilation</span><span class="o">=</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="mi">2</span><span class="p">))</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">16</span><span class="p">,</span> <span class="mi">56</span><span class="p">,</span> <span class="mi">56</span><span class="p">,</span> <span class="mi">56</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="c1"># quantize input to qint8</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">q_input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">scale</span><span class="o">=</span><span class="mf">1.0</span><span class="p">,</span> <span class="n">zero_point</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">qint32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">m</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
</pre></div>
</div>
<dl class="method">
<dt id="torch.nn.quantized.Conv3d.from_float">
<em class="property">classmethod </em><code class="sig-name descname">from_float</code><span class="sig-paren">(</span><em class="sig-param">mod</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/modules/conv.html#Conv3d.from_float"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.Conv3d.from_float" title="Permalink to this definition">¶</a></dt>
<dd><p>Creates a quantized module from a float module or qparams_dict.</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>mod</strong> (<a class="reference internal" href="nn.html#torch.nn.Module" title="torch.nn.Module"><em>Module</em></a>) – a float module, either produced by torch.quantization
utilities or provided by the user</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="floatfunctional">
<h3>FloatFunctional<a class="headerlink" href="#floatfunctional" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.FloatFunctional">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.</code><code class="sig-name descname">FloatFunctional</code><a class="reference internal" href="_modules/torch/nn/quantized/modules/functional_modules.html#FloatFunctional"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.FloatFunctional" title="Permalink to this definition">¶</a></dt>
<dd><p>State collector class for float operatitons.</p>
<p>The instance of this class can be used instead of the <code class="docutils literal notranslate"><span class="pre">torch.</span></code> prefix for
some operations. See example usage below.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>This class does not provide a <code class="docutils literal notranslate"><span class="pre">forward</span></code> hook. Instead, you must use
one of the underlying functions (e.g. <code class="docutils literal notranslate"><span class="pre">add</span></code>).</p>
</div>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">f_add</span> <span class="o">=</span> <span class="n">FloatFunctional</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">a</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="mf">3.0</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">b</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="mf">4.0</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">f_add</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">)</span>  <span class="c1"># Equivalent to ``torch.add(a, b)``</span>
</pre></div>
</div>
<dl class="simple">
<dt>Valid operation names:</dt><dd><ul class="simple">
<li><p>add</p></li>
<li><p>cat</p></li>
<li><p>mul</p></li>
<li><p>add_relu</p></li>
<li><p>add_scalar</p></li>
<li><p>mul_scalar</p></li>
</ul>
</dd>
</dl>
</dd></dl>

</div>
<div class="section" id="qfunctional">
<h3>QFunctional<a class="headerlink" href="#qfunctional" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.QFunctional">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.</code><code class="sig-name descname">QFunctional</code><a class="reference internal" href="_modules/torch/nn/quantized/modules/functional_modules.html#QFunctional"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.QFunctional" title="Permalink to this definition">¶</a></dt>
<dd><p>Wrapper class for quantized operatitons.</p>
<p>The instance of this class can be used instead of the
<code class="docutils literal notranslate"><span class="pre">torch.ops.quantized</span></code> prefix. See example usage below.</p>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>This class does not provide a <code class="docutils literal notranslate"><span class="pre">forward</span></code> hook. Instead, you must use
one of the underlying functions (e.g. <code class="docutils literal notranslate"><span class="pre">add</span></code>).</p>
</div>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">q_add</span> <span class="o">=</span> <span class="n">QFunctional</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">a</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="mf">3.0</span><span class="p">),</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">qint32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">b</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">(</span><span class="mf">4.0</span><span class="p">),</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">qint32</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">q_add</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">)</span>  <span class="c1"># Equivalent to ``torch.ops.quantized.add(a, b, 1.0, 0)``</span>
</pre></div>
</div>
<dl class="simple">
<dt>Valid operation names:</dt><dd><ul class="simple">
<li><p>add</p></li>
<li><p>cat</p></li>
<li><p>mul</p></li>
<li><p>add_relu</p></li>
<li><p>add_scalar</p></li>
<li><p>mul_scalar</p></li>
</ul>
</dd>
</dl>
</dd></dl>

</div>
<div class="section" id="quantize">
<h3>Quantize<a class="headerlink" href="#quantize" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.Quantize">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.</code><code class="sig-name descname">Quantize</code><span class="sig-paren">(</span><em class="sig-param">scale</em>, <em class="sig-param">zero_point</em>, <em class="sig-param">dtype</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/modules.html#Quantize"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.Quantize" title="Permalink to this definition">¶</a></dt>
<dd><p>Quantizes an incoming tensor</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>scale</strong> – scale of the output Quantized Tensor</p></li>
<li><p><strong>zero_point</strong> – zero_point of output Quantized Tensor</p></li>
<li><p><strong>dtype</strong> – data type of output Quantized Tensor</p></li>
</ul>
</dd>
<dt class="field-even">Variables</dt>
<dd class="field-even"><p><strong>zero_point, dtype</strong> (<em>`scale`</em><em>,</em>) – </p>
</dd>
</dl>
<dl>
<dt>Examples::</dt><dd><div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">t</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.</span><span class="p">],</span> <span class="p">[</span><span class="mf">1.</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.</span><span class="p">]])</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="p">,</span> <span class="n">dtype</span> <span class="o">=</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">qint8</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">qm</span> <span class="o">=</span> <span class="n">Quantize</span><span class="p">(</span><span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">qt</span> <span class="o">=</span> <span class="n">qm</span><span class="p">(</span><span class="n">t</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">print</span><span class="p">(</span><span class="n">qt</span><span class="p">)</span>
<span class="go">tensor([[ 1., -1.],</span>
<span class="go">        [ 1., -1.]], size=(2, 2), dtype=torch.qint8, scale=1.0, zero_point=2)</span>
</pre></div>
</div>
</dd>
</dl>
</dd></dl>

</div>
<div class="section" id="dequantize">
<h3>DeQuantize<a class="headerlink" href="#dequantize" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.DeQuantize">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.</code><code class="sig-name descname">DeQuantize</code><a class="reference internal" href="_modules/torch/nn/quantized/modules.html#DeQuantize"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.DeQuantize" title="Permalink to this definition">¶</a></dt>
<dd><p>Dequantizes an incoming tensor</p>
<dl>
<dt>Examples::</dt><dd><div class="doctest highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([[</span><span class="mf">1.</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.</span><span class="p">],</span> <span class="p">[</span><span class="mf">1.</span><span class="p">,</span> <span class="o">-</span><span class="mf">1.</span><span class="p">]])</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="p">,</span> <span class="n">dtype</span> <span class="o">=</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">qint8</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">qm</span> <span class="o">=</span> <span class="n">Quantize</span><span class="p">(</span><span class="n">scale</span><span class="p">,</span> <span class="n">zero_point</span><span class="p">,</span> <span class="n">dtype</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">quantized_input</span> <span class="o">=</span> <span class="n">qm</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dqm</span> <span class="o">=</span> <span class="n">DeQuantize</span><span class="p">()</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">dequantized</span> <span class="o">=</span> <span class="n">dqm</span><span class="p">(</span><span class="n">quantized_input</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">print</span><span class="p">(</span><span class="n">dequantized</span><span class="p">)</span>
<span class="go">tensor([[ 1., -1.],</span>
<span class="go">        [ 1., -1.]], dtype=torch.float32)</span>
</pre></div>
</div>
</dd>
</dl>
</dd></dl>

</div>
<div class="section" id="id13">
<h3>Linear<a class="headerlink" href="#id13" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.Linear">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.</code><code class="sig-name descname">Linear</code><span class="sig-paren">(</span><em class="sig-param">in_features</em>, <em class="sig-param">out_features</em>, <em class="sig-param">bias_=True</em>, <em class="sig-param">dtype=torch.qint8</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/modules/linear.html#Linear"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.Linear" title="Permalink to this definition">¶</a></dt>
<dd><p>A quantized linear module with quantized tensor as inputs and outputs.
We adopt the same interface as <cite>torch.nn.Linear</cite>, please see
<a class="reference external" href="https://pytorch.org/docs/stable/nn.html#torch.nn.Linear">https://pytorch.org/docs/stable/nn.html#torch.nn.Linear</a> for documentation.</p>
<p>Similar to <a class="reference internal" href="nn.html#torch.nn.Linear" title="torch.nn.Linear"><code class="xref py py-class docutils literal notranslate"><span class="pre">Linear</span></code></a>, attributes will be randomly
initialized at module creation time and will be overwritten later</p>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>~Linear.weight</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – the non-learnable quantized weights of the module of
shape <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>out_features</mtext><mo separator="true">,</mo><mtext>in_features</mtext><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{out\_features}, \text{in\_features})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">out_features</span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord text"><span class="mord">in_features</span></span><span class="mclose">)</span></span></span></span>

</span>.</p></li>
<li><p><strong>~Linear.bias</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – the non-learnable bias of the module of shape <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>out_features</mtext><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{out\_features})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">out_features</span></span><span class="mclose">)</span></span></span></span>

</span>.
If <code class="xref py py-attr docutils literal notranslate"><span class="pre">bias</span></code> is <code class="docutils literal notranslate"><span class="pre">True</span></code>, the values are initialized to zero.</p></li>
<li><p><strong>~Linear.scale</strong> – <cite>scale</cite> parameter of output Quantized Tensor, type: double</p></li>
<li><p><strong>~Linear.zero_point</strong> – <cite>zero_point</cite> parameter for output Quantized Tensor, type: long</p></li>
</ul>
</dd>
</dl>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">quantized</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">30</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">128</span><span class="p">,</span> <span class="mi">20</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">quantize_per_tensor</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">,</span> <span class="mi">0</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">quint8</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">m</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">print</span><span class="p">(</span><span class="n">output</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
<span class="go">torch.Size([128, 30])</span>
</pre></div>
</div>
<dl class="method">
<dt id="torch.nn.quantized.Linear.from_float">
<em class="property">classmethod </em><code class="sig-name descname">from_float</code><span class="sig-paren">(</span><em class="sig-param">mod</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/modules/linear.html#Linear.from_float"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.Linear.from_float" title="Permalink to this definition">¶</a></dt>
<dd><p>Create a quantized module from a float module or qparams_dict</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>mod</strong> (<a class="reference internal" href="nn.html#torch.nn.Module" title="torch.nn.Module"><em>Module</em></a>) – a float module, either produced by torch.quantization
utilities or provided by the user</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
</div>
<div class="section" id="id14">
<h2>torch.nn.quantized.dynamic<a class="headerlink" href="#id14" title="Permalink to this headline">¶</a></h2>
<span class="target" id="module-torch.nn.quantized.dynamic"></span><div class="section" id="id15">
<h3>Linear<a class="headerlink" href="#id15" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.dynamic.Linear">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.dynamic.</code><code class="sig-name descname">Linear</code><span class="sig-paren">(</span><em class="sig-param">in_features</em>, <em class="sig-param">out_features</em>, <em class="sig-param">bias_=True</em>, <em class="sig-param">dtype=torch.qint8</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/dynamic/modules/linear.html#Linear"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.dynamic.Linear" title="Permalink to this definition">¶</a></dt>
<dd><p>A dynamic quantized linear module with quantized tensor as inputs and outputs.
We adopt the same interface as <cite>torch.nn.Linear</cite>, please see
<a class="reference external" href="https://pytorch.org/docs/stable/nn.html#torch.nn.Linear">https://pytorch.org/docs/stable/nn.html#torch.nn.Linear</a> for documentation.</p>
<p>Similar to <a class="reference internal" href="nn.html#torch.nn.Linear" title="torch.nn.Linear"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.nn.Linear</span></code></a>, attributes will be randomly
initialized at module creation time and will be overwritten later</p>
<dl class="field-list simple">
<dt class="field-odd">Variables</dt>
<dd class="field-odd"><ul class="simple">
<li><p><strong>~Linear.weight</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – the non-learnable quantized weights of the module which are of
shape <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>out_features</mtext><mo separator="true">,</mo><mtext>in_features</mtext><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{out\_features}, \text{in\_features})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">out_features</span></span><span class="mpunct">,</span><span class="mspace" style="margin-right:0.16666666666666666em;"></span><span class="mord text"><span class="mord">in_features</span></span><span class="mclose">)</span></span></span></span>

</span>.</p></li>
<li><p><strong>~Linear.bias</strong> (<a class="reference internal" href="tensors.html#torch.Tensor" title="torch.Tensor"><em>Tensor</em></a>) – the non-learnable bias of the module of shape <span class="math"><span class="katex"><span class="katex-mathml"><math xmlns="http://www.w3.org/1998/Math/MathML"><semantics><mrow><mo stretchy="false">(</mo><mtext>out_features</mtext><mo stretchy="false">)</mo></mrow><annotation encoding="application/x-tex">(\text{out\_features})</annotation></semantics></math></span><span class="katex-html" aria-hidden="true"><span class="base"><span class="strut" style="height:1.06em;vertical-align:-0.31em;"></span><span class="mopen">(</span><span class="mord text"><span class="mord">out_features</span></span><span class="mclose">)</span></span></span></span>

</span>.
If <code class="xref py py-attr docutils literal notranslate"><span class="pre">bias</span></code> is <code class="docutils literal notranslate"><span class="pre">True</span></code>, the values are initialized to zero.</p></li>
</ul>
</dd>
</dl>
<p>Examples:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">&gt;&gt;&gt; </span><span class="n">m</span> <span class="o">=</span> <span class="n">nn</span><span class="o">.</span><span class="n">quantized</span><span class="o">.</span><span class="n">dynamic</span><span class="o">.</span><span class="n">Linear</span><span class="p">(</span><span class="mi">20</span><span class="p">,</span> <span class="mi">30</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">input</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">128</span><span class="p">,</span> <span class="mi">20</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="n">output</span> <span class="o">=</span> <span class="n">m</span><span class="p">(</span><span class="nb">input</span><span class="p">)</span>
<span class="gp">&gt;&gt;&gt; </span><span class="nb">print</span><span class="p">(</span><span class="n">output</span><span class="o">.</span><span class="n">size</span><span class="p">())</span>
<span class="go">torch.Size([128, 30])</span>
</pre></div>
</div>
<dl class="method">
<dt id="torch.nn.quantized.dynamic.Linear.from_float">
<em class="property">classmethod </em><code class="sig-name descname">from_float</code><span class="sig-paren">(</span><em class="sig-param">mod</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/dynamic/modules/linear.html#Linear.from_float"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.dynamic.Linear.from_float" title="Permalink to this definition">¶</a></dt>
<dd><p>Create a dynamic quantized module from a float module or qparams_dict</p>
<dl class="field-list simple">
<dt class="field-odd">Parameters</dt>
<dd class="field-odd"><p><strong>mod</strong> (<a class="reference internal" href="nn.html#torch.nn.Module" title="torch.nn.Module"><em>Module</em></a>) – a float module, either produced by torch.quantization
utilities or provided by the user</p>
</dd>
</dl>
</dd></dl>

</dd></dl>

</div>
<div class="section" id="lstm">
<h3>LSTM<a class="headerlink" href="#lstm" title="Permalink to this headline">¶</a></h3>
<dl class="class">
<dt id="torch.nn.quantized.dynamic.LSTM">
<em class="property">class </em><code class="sig-prename descclassname">torch.nn.quantized.dynamic.</code><code class="sig-name descname">LSTM</code><span class="sig-paren">(</span><em class="sig-param">*args</em>, <em class="sig-param">**kwargs</em><span class="sig-paren">)</span><a class="reference internal" href="_modules/torch/nn/quantized/dynamic/modules/rnn.html#LSTM"><span class="viewcode-link">[source]</span></a><a class="headerlink" href="#torch.nn.quantized.dynamic.LSTM" title="Permalink to this definition">¶</a></dt>
<dd></dd></dl>

</div>
</div>
</div>


             </article>
             
            </div>
            <footer>
  
    <div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
      
        <a href="rpc/index.html" class="btn btn-neutral float-right" title="Distributed RPC Framework" accesskey="n" rel="next">Next <img src="_static/images/chevron-right-orange.svg" class="next-page"></a>
      
      
        <a href="optim.html" class="btn btn-neutral" title="torch.optim" accesskey="p" rel="prev"><img src="_static/images/chevron-right-orange.svg" class="previous-page"> Previous</a>
      
    </div>
  

  

    <hr>

  

  <div role="contentinfo">
    <p>
        &copy; Copyright 2019, Torch Contributors.

    </p>
  </div>
    
      <div>
        Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
      </div>
     

</footer>

          </div>
        </div>

        <div class="pytorch-content-right" id="pytorch-content-right">
          <div class="pytorch-right-menu" id="pytorch-right-menu">
            <div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
              <ul>
<li><a class="reference internal" href="#">Quantization</a><ul>
<li><a class="reference internal" href="#introduction-to-quantization">Introduction to Quantization</a></li>
<li><a class="reference internal" href="#quantized-tensors">Quantized Tensors</a></li>
<li><a class="reference internal" href="#operation-coverage">Operation coverage</a><ul>
<li><a class="reference internal" href="#quantized-torch-tensor-operations">Quantized <code class="docutils literal notranslate"><span class="pre">torch.Tensor</span></code> operations</a></li>
<li><a class="reference internal" href="#torch-nn-functional"><code class="docutils literal notranslate"><span class="pre">torch.nn.functional</span></code></a></li>
<li><a class="reference internal" href="#torch-nn-intrinsic"><code class="docutils literal notranslate"><span class="pre">torch.nn.intrinsic</span></code></a></li>
<li><a class="reference internal" href="#torch-nn-qat"><code class="docutils literal notranslate"><span class="pre">torch.nn.qat</span></code></a></li>
<li><a class="reference internal" href="#torch-quantization"><code class="docutils literal notranslate"><span class="pre">torch.quantization</span></code></a></li>
<li><a class="reference internal" href="#torch-nn-quantized"><code class="docutils literal notranslate"><span class="pre">torch.nn.quantized</span></code></a></li>
<li><a class="reference internal" href="#torch-nn-quantized-dynamic"><code class="docutils literal notranslate"><span class="pre">torch.nn.quantized.dynamic</span></code></a></li>
<li><a class="reference internal" href="#torch-nn-quantized-functional"><code class="docutils literal notranslate"><span class="pre">torch.nn.quantized.functional</span></code></a></li>
<li><a class="reference internal" href="#quantized-dtypes-and-quantization-schemes">Quantized dtypes and quantization schemes</a></li>
</ul>
</li>
<li><a class="reference internal" href="#quantization-workflows">Quantization Workflows</a></li>
<li><a class="reference internal" href="#model-preparation-for-quantization">Model Preparation for Quantization</a></li>
<li><a class="reference internal" href="#id1">torch.quantization</a><ul>
<li><a class="reference internal" href="#top-level-quantization-apis">Top-level quantization APIs</a></li>
<li><a class="reference internal" href="#preparing-model-for-quantization">Preparing model for quantization</a></li>
<li><a class="reference internal" href="#utility-functions">Utility functions</a></li>
<li><a class="reference internal" href="#observers">Observers</a></li>
<li><a class="reference internal" href="#debugging-utilities">Debugging utilities</a></li>
</ul>
</li>
<li><a class="reference internal" href="#id2">torch.nn.intrinsic</a><ul>
<li><a class="reference internal" href="#convbn2d">ConvBn2d</a></li>
<li><a class="reference internal" href="#convbnrelu2d">ConvBnReLU2d</a></li>
<li><a class="reference internal" href="#convrelu2d">ConvReLU2d</a></li>
<li><a class="reference internal" href="#convrelu3d">ConvReLU3d</a></li>
<li><a class="reference internal" href="#linearrelu">LinearReLU</a></li>
</ul>
</li>
<li><a class="reference internal" href="#torch-nn-instrinsic-qat">torch.nn.instrinsic.qat</a><ul>
<li><a class="reference internal" href="#id3">ConvBn2d</a></li>
<li><a class="reference internal" href="#id4">ConvBnReLU2d</a></li>
<li><a class="reference internal" href="#id5">ConvReLU2d</a></li>
<li><a class="reference internal" href="#id6">LinearReLU</a></li>
</ul>
</li>
<li><a class="reference internal" href="#torch-nn-intrinsic-quantized">torch.nn.intrinsic.quantized</a><ul>
<li><a class="reference internal" href="#id7">ConvReLU2d</a></li>
<li><a class="reference internal" href="#id8">ConvReLU3d</a></li>
<li><a class="reference internal" href="#id9">LinearReLU</a></li>
</ul>
</li>
<li><a class="reference internal" href="#id10">torch.nn.qat</a><ul>
<li><a class="reference internal" href="#conv2d">Conv2d</a></li>
<li><a class="reference internal" href="#linear">Linear</a></li>
</ul>
</li>
<li><a class="reference internal" href="#id11">torch.nn.quantized</a><ul>
<li><a class="reference internal" href="#module-torch.nn.quantized.functional">Functional interface</a></li>
<li><a class="reference internal" href="#relu">ReLU</a></li>
<li><a class="reference internal" href="#relu6">ReLU6</a></li>
<li><a class="reference internal" href="#id12">Conv2d</a></li>
<li><a class="reference internal" href="#conv3d">Conv3d</a></li>
<li><a class="reference internal" href="#floatfunctional">FloatFunctional</a></li>
<li><a class="reference internal" href="#qfunctional">QFunctional</a></li>
<li><a class="reference internal" href="#quantize">Quantize</a></li>
<li><a class="reference internal" href="#dequantize">DeQuantize</a></li>
<li><a class="reference internal" href="#id13">Linear</a></li>
</ul>
</li>
<li><a class="reference internal" href="#id14">torch.nn.quantized.dynamic</a><ul>
<li><a class="reference internal" href="#id15">Linear</a></li>
<li><a class="reference internal" href="#lstm">LSTM</a></li>
</ul>
</li>
</ul>
</li>
</ul>

            </div>
          </div>
        </div>
      </section>
    </div>

  


  

     
       <script type="text/javascript" id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
         <script src="_static/jquery.js"></script>
         <script src="_static/underscore.js"></script>
         <script src="_static/doctools.js"></script>
         <script src="_static/language_data.js"></script>
     

  

  <script type="text/javascript" src="_static/js/vendor/popper.min.js"></script>
  <script type="text/javascript" src="_static/js/vendor/bootstrap.min.js"></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
  <script type="text/javascript" src="_static/js/theme.js"></script>

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>
 
<script>
  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
  })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

  ga('create', 'UA-90545585-1', 'auto');
  ga('send', 'pageview');

</script>

<script async src="https://www.googletagmanager.com/gtag/js?id=UA-117752657-2"></script>

<script>
  window.dataLayer = window.dataLayer || [];

  function gtag(){dataLayer.push(arguments);}

  gtag('js', new Date());
  gtag('config', 'UA-117752657-2');
</script>

<img height="1" width="1" style="border-style:none;" alt="" src="https://www.googleadservices.com/pagead/conversion/795629140/?label=txkmCPmdtosBENSssfsC&amp;guid=ON&amp;script=0"/>


  <!-- Begin Footer -->

  <div class="container-fluid docs-tutorials-resources" id="docs-tutorials-resources">
    <div class="container">
      <div class="row">
        <div class="col-md-4 text-center">
          <h2>Docs</h2>
          <p>Access comprehensive developer documentation for PyTorch</p>
          <a class="with-right-arrow" href="https://pytorch.org/docs/stable/index.html">View Docs</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Tutorials</h2>
          <p>Get in-depth tutorials for beginners and advanced developers</p>
          <a class="with-right-arrow" href="https://pytorch.org/tutorials">View Tutorials</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Resources</h2>
          <p>Find development resources and get your questions answered</p>
          <a class="with-right-arrow" href="https://pytorch.org/resources">View Resources</a>
        </div>
      </div>
    </div>
  </div>

  <footer class="site-footer">
    <div class="container footer-container">
      <div class="footer-logo-wrapper">
        <a href="https://pytorch.org/" class="footer-logo"></a>
      </div>

      <div class="footer-links-wrapper">
        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/">PyTorch</a></li>
            <li><a href="https://pytorch.org/get-started">Get Started</a></li>
            <li><a href="https://pytorch.org/features">Features</a></li>
            <li><a href="https://pytorch.org/ecosystem">Ecosystem</a></li>
            <li><a href="https://pytorch.org/blog/">Blog</a></li>
            <li><a href="https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md">Contributing</a></li>
          </ul>
        </div>

        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/resources">Resources</a></li>
            <li><a href="https://pytorch.org/tutorials">Tutorials</a></li>
            <li><a href="https://pytorch.org/docs/stable/index.html">Docs</a></li>
            <li><a href="https://discuss.pytorch.org" target="_blank">Discuss</a></li>
            <li><a href="https://github.com/pytorch/pytorch/issues" target="_blank">Github Issues</a></li>
            <li><a href="https://pytorch.org/assets/brand-guidelines/PyTorch-Brand-Guidelines.pdf" target="_blank">Brand Guidelines</a></li>
          </ul>
        </div>

        <div class="footer-links-col follow-us-col">
          <ul>
            <li class="list-title">Stay Connected</li>
            <li>
              <div id="mc_embed_signup">
                <form
                  action="https://twitter.us14.list-manage.com/subscribe/post?u=75419c71fe0a935e53dfa4a3f&id=91d0dccd39"
                  method="post"
                  id="mc-embedded-subscribe-form"
                  name="mc-embedded-subscribe-form"
                  class="email-subscribe-form validate"
                  target="_blank"
                  novalidate>
                  <div id="mc_embed_signup_scroll" class="email-subscribe-form-fields-wrapper">
                    <div class="mc-field-group">
                      <label for="mce-EMAIL" style="display:none;">Email Address</label>
                      <input type="email" value="" name="EMAIL" class="required email" id="mce-EMAIL" placeholder="Email Address">
                    </div>

                    <div id="mce-responses" class="clear">
                      <div class="response" id="mce-error-response" style="display:none"></div>
                      <div class="response" id="mce-success-response" style="display:none"></div>
                    </div>    <!-- real people should not fill this in and expect good things - do not remove this or risk form bot signups-->

                    <div style="position: absolute; left: -5000px;" aria-hidden="true"><input type="text" name="b_75419c71fe0a935e53dfa4a3f_91d0dccd39" tabindex="-1" value=""></div>

                    <div class="clear">
                      <input type="submit" value="" name="subscribe" id="mc-embedded-subscribe" class="button email-subscribe-button">
                    </div>
                  </div>
                </form>
              </div>

            </li>
          </ul>

          <div class="footer-social-icons">
            <a href="https://www.facebook.com/pytorch" target="_blank" class="facebook"></a>
            <a href="https://twitter.com/pytorch" target="_blank" class="twitter"></a>
            <a href="https://www.youtube.com/pytorch" target="_blank" class="youtube"></a>
          </div>
        </div>
      </div>
    </div>
  </footer>

  <div class="cookie-banner-wrapper">
  <div class="container">
    <p class="gdpr-notice">To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: <a href="https://www.facebook.com/policies/cookies/">Cookies Policy</a>.</p>
    <img class="close-button" src="_static/images/pytorch-x.svg">
  </div>
</div>

  <!-- End Footer -->

  <!-- Begin Mobile Menu -->

  <div class="mobile-main-menu">
    <div class="container-fluid">
      <div class="container">
        <div class="mobile-main-menu-header-container">
          <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
          <a class="main-menu-close-button" href="#" data-behavior="close-mobile-menu"></a>
        </div>
      </div>
    </div>

    <div class="mobile-main-menu-links-container">
      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <a href="https://pytorch.org/features">Features</a>
          </li>

          <li>
            <a href="https://pytorch.org/ecosystem">Ecosystem</a>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/hub">PyTorch Hub</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <a href="https://pytorch.org/resources">Resources</a>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>
    </div>
  </div>

  <!-- End Mobile Menu -->

  <script type="text/javascript" src="_static/js/vendor/anchor.min.js"></script>

  <script type="text/javascript">
    $(document).ready(function() {
      mobileMenu.bind();
      mobileTOC.bind();
      pytorchAnchors.bind();
      sideMenus.bind();
      scrollToAnchor.bind();
      highlightNavigation.bind();
      mainMenuDropdown.bind();
      filterTags.bind();

      // Remove any empty p tags that Sphinx adds
      $("[data-tags='null']").remove();

      // Add class to links that have code blocks, since we cannot create links in code blocks
      $("article.pytorch-article a span.pre").each(function(e) {
        $(this).closest("a").addClass("has-code");
      });
    })
  </script>
</body>
</html>