


<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
  <meta charset="utf-8">
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  
  <title>torch.quantization.quantize &mdash; PyTorch master documentation</title>
  

  
  
  
  
    <link rel="canonical" href="https://pytorch.org/docs/stable/_modules/torch/quantization/quantize.html"/>
  

  

  
  
    

  

  <link rel="stylesheet" href="../../../_static/css/theme.css" type="text/css" />
  <!-- <link rel="stylesheet" href="../../../_static/pygments.css" type="text/css" /> -->
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/css/jit.css" type="text/css" />
  <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.11.1/dist/katex.min.css" type="text/css" />
  <link rel="stylesheet" href="../../../_static/katex-math.css" type="text/css" />
    <link rel="index" title="Index" href="../../../genindex.html" />
    <link rel="search" title="Search" href="../../../search.html" /> 

  
  <script src="../../../_static/js/modernizr.min.js"></script>

  <!-- Preload the theme fonts -->

<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../../../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">

<!-- Preload the katex fonts -->

<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
</head>

<div class="container-fluid header-holder tutorials-header" id="header-holder">
  <div class="container">
    <div class="header-container">
      <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>

      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <div class="ecosystem-dropdown">
              <a id="dropdownMenuButton" data-toggle="ecosystem-dropdown">
                Ecosystem
              </a>
              <div class="ecosystem-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/hub"">
                  <span class=dropdown-title>Models (Beta)</span>
                  <p>Discover, publish, and reuse pre-trained models</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/ecosystem">
                  <span class=dropdown-title>Tools & Libraries</span>
                  <p>Explore the ecosystem of tools and libraries</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <div class="resources-dropdown">
              <a id="resourcesDropdownButton" data-toggle="resources-dropdown">
                Resources
              </a>
              <div class="resources-dropdown-menu">
                <a class="nav-dropdown-item" href="https://pytorch.org/resources"">
                  <span class=dropdown-title>Developer Resources</span>
                  <p>Find resources and get questions answered</p>
                </a>
                <a class="nav-dropdown-item" href="https://pytorch.org/features">
                  <span class=dropdown-title>About</span>
                  <p>Learn about PyTorch’s features and capabilities</p>
                </a>
              </div>
            </div>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>

      <a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
    </div>

  </div>
</div>


<body class="pytorch-body">

   

    

    <div class="table-of-contents-link-wrapper">
      <span>Table of Contents</span>
      <a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
    </div>

    <nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
      <div class="pytorch-side-scroll">
        <div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          <div class="pytorch-left-menu-search">
            

            
              
              
                <div class="version">
                  master (1.5.0 )
                </div>
              
            

            


  


<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search Docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

            
          </div>

          
<div>
  <a style="color:#F05732" href="https://pytorch.org/docs/stable/_modules/torch/quantization/quantize.html">
    You are viewing unstable developer preview docs.
    Click here to view docs for latest stable release.
  </a>
</div>

            
            
              
            
            
              <p class="caption"><span class="caption-text">Notes</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/amp_examples.html">Automatic Mixed Precision examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/autograd.html">Autograd mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/broadcasting.html">Broadcasting semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/cpu_threading_torchscript_inference.html">CPU threading and TorchScript inference</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/cuda.html">CUDA semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/ddp.html">Distributed Data Parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/extending.html">Extending PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/large_scale_deployments.html">Features for large-scale deployments</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/multiprocessing.html">Multiprocessing best practices</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/randomness.html">Reproducibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/serialization.html">Serialization semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../notes/windows.html">Windows FAQ</a></li>
</ul>
<p class="caption"><span class="caption-text">Language Bindings</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/cppdocs/">C++ API</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../packages.html">Javadoc</a></li>
</ul>
<p class="caption"><span class="caption-text">Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../torch.html">torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../nn.html">torch.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../nn.functional.html">torch.nn.functional</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensors.html">torch.Tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensor_attributes.html">Tensor Attributes</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensor_view.html">Tensor Views</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../autograd.html">torch.autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../cuda.html">torch.cuda</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../amp.html">torch.cuda.amp</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../distributed.html">torch.distributed</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../distributions.html">torch.distributions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../hub.html">torch.hub</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../jit.html">torch.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../nn.init.html">torch.nn.init</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../onnx.html">torch.onnx</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../optim.html">torch.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../quantization.html">Quantization</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../rpc/index.html">Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../random.html">torch.random</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../sparse.html">torch.sparse</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../storage.html">torch.Storage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../bottleneck.html">torch.utils.bottleneck</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../checkpoint.html">torch.utils.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../cpp_extension.html">torch.utils.cpp_extension</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../data.html">torch.utils.data</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../dlpack.html">torch.utils.dlpack</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../model_zoo.html">torch.utils.model_zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../tensorboard.html">torch.utils.tensorboard</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../type_info.html">Type Info</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../named_tensor.html">Named Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../name_inference.html">Named Tensors operator coverage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../__config__.html">torch.__config__</a></li>
</ul>
<p class="caption"><span class="caption-text">Libraries</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/audio">torchaudio</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/text">torchtext</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/elastic/">TorchElastic</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/serve">TorchServe</a></li>
<li class="toctree-l1"><a class="reference external" href="http://pytorch.org/xla/">PyTorch on XLA Devices</a></li>
</ul>
<p class="caption"><span class="caption-text">Community</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../community/contribution_guide.html">PyTorch Contribution Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../community/governance.html">PyTorch Governance</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../community/persons_of_interest.html">PyTorch Governance | Persons of Interest</a></li>
</ul>

            
          

        </div>
      </div>
    </nav>

    <div class="pytorch-container">
      <div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
        <div class="pytorch-breadcrumbs-wrapper">
          















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="pytorch-breadcrumbs">
    
      <li>
        <a href="../../../index.html">
          
            Docs
          
        </a> &gt;
      </li>

        
          <li><a href="../../index.html">Module code</a> &gt;</li>
        
          <li><a href="../../torch.html">torch</a> &gt;</li>
        
          <li><a href="../quantization.html">torch.quantization</a> &gt;</li>
        
      <li>torch.quantization.quantize</li>
    
    
      <li class="pytorch-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
</div>
        </div>

        <div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
          Shortcuts
        </div>
      </div>

      <section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
        <div class="pytorch-content-left">

        
          
          <div class="rst-content">
          
            <div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
             <article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
              
  <h1>Source code for torch.quantization.quantize</h1><div class="highlight"><pre>
<span></span><span class="kn">from</span> <span class="nn">__future__</span> <span class="kn">import</span> <span class="n">absolute_import</span><span class="p">,</span> <span class="n">division</span><span class="p">,</span> <span class="n">print_function</span><span class="p">,</span> <span class="n">unicode_literals</span>

<span class="kn">import</span> <span class="nn">copy</span>
<span class="kn">import</span> <span class="nn">itertools</span>
<span class="kn">import</span> <span class="nn">warnings</span>

<span class="kn">import</span> <span class="nn">torch</span>
<span class="kn">import</span> <span class="nn">torch.nn</span> <span class="k">as</span> <span class="nn">nn</span>
<span class="kn">import</span> <span class="nn">torch.nn.intrinsic</span> <span class="k">as</span> <span class="nn">nni</span>
<span class="kn">import</span> <span class="nn">torch.nn.quantized</span> <span class="k">as</span> <span class="nn">nnq</span>

<span class="kn">from</span> <span class="nn">.default_mappings</span> <span class="kn">import</span> <span class="p">(</span><span class="n">DEFAULT_DYNAMIC_MODULE_MAPPING</span><span class="p">,</span>
                               <span class="n">DEFAULT_MODULE_MAPPING</span><span class="p">,</span>
                               <span class="n">DEFAULT_QAT_MODULE_MAPPING</span><span class="p">,</span>
                               <span class="n">DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST</span><span class="p">)</span>
<span class="kn">from</span> <span class="nn">.stubs</span> <span class="kn">import</span> <span class="n">DeQuantStub</span><span class="p">,</span> <span class="n">QuantWrapper</span>
<span class="kn">from</span> <span class="nn">.qconfig</span> <span class="kn">import</span> <span class="n">default_dynamic_qconfig</span><span class="p">,</span> <span class="n">float16_dynamic_qconfig</span>

<span class="k">def</span> <span class="nf">_propagate_qconfig_helper</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">qconfig_dict</span><span class="p">,</span> <span class="n">white_list</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                              <span class="n">qconfig_parent</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">prefix</span><span class="o">=</span><span class="s1">&#39;&#39;</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;This is a helper function for `propagate_qconfig_`</span>

<span class="sd">    Args:</span>
<span class="sd">        module: input module</span>
<span class="sd">        qconfig_dict: dictionary that maps from name of submodule to quantization</span>
<span class="sd">                     configuration</span>
<span class="sd">        white_list: list of quantizable modules</span>
<span class="sd">        qconfig_parent: quantization config of parent module, we will fallback to</span>
<span class="sd">                       this config when there is no specified config for current</span>
<span class="sd">                       module</span>
<span class="sd">        prefix: corresponding prefix of the current module, used as key in</span>
<span class="sd">                qconfig_dict</span>

<span class="sd">    Return:</span>
<span class="sd">        None, module is modified inplace with qconfig attached</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="c1"># TODO: Add test</span>
    <span class="k">if</span> <span class="n">white_list</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">white_list</span> <span class="o">=</span> <span class="n">DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST</span>

    <span class="n">module_qconfig</span> <span class="o">=</span> <span class="n">qconfig_dict</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">module</span><span class="p">),</span> <span class="n">qconfig_parent</span><span class="p">)</span>
    <span class="n">module_qconfig</span> <span class="o">=</span> <span class="n">qconfig_dict</span><span class="o">.</span><span class="n">get</span><span class="p">(</span><span class="n">prefix</span><span class="p">,</span> <span class="n">module_qconfig</span><span class="p">)</span>
    <span class="n">module_qconfig</span> <span class="o">=</span> <span class="nb">getattr</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="s1">&#39;qconfig&#39;</span><span class="p">,</span> <span class="n">module_qconfig</span><span class="p">)</span>

    <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">module</span><span class="p">)</span> <span class="ow">in</span> <span class="n">white_list</span><span class="p">:</span>
        <span class="n">module</span><span class="o">.</span><span class="n">qconfig</span> <span class="o">=</span> <span class="n">module_qconfig</span>
    <span class="k">for</span> <span class="n">name</span><span class="p">,</span> <span class="n">child</span> <span class="ow">in</span> <span class="n">module</span><span class="o">.</span><span class="n">named_children</span><span class="p">():</span>
        <span class="n">module_prefix</span> <span class="o">=</span> <span class="n">prefix</span> <span class="o">+</span> <span class="s1">&#39;.&#39;</span> <span class="o">+</span> <span class="n">name</span> <span class="k">if</span> <span class="n">prefix</span> <span class="k">else</span> <span class="n">name</span>
        <span class="n">_propagate_qconfig_helper</span><span class="p">(</span><span class="n">child</span><span class="p">,</span> <span class="n">qconfig_dict</span><span class="p">,</span> <span class="n">white_list</span><span class="p">,</span>
                                  <span class="n">module_qconfig</span><span class="p">,</span> <span class="n">module_prefix</span><span class="p">)</span>

<span class="c1"># TODO(jerryzh): expose white_list</span>
<div class="viewcode-block" id="propagate_qconfig_"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.propagate_qconfig_">[docs]</a><span class="k">def</span> <span class="nf">propagate_qconfig_</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">qconfig_dict</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Propagate qconfig through the module hierarchy and assign `qconfig`</span>
<span class="sd">    attribute on each leaf module</span>

<span class="sd">    Args:</span>
<span class="sd">        module: input module</span>
<span class="sd">        qconfig_dict: dictionary that maps from name or type of submodule to</span>
<span class="sd">            quantization configuration, qconfig applies to all submodules of a</span>
<span class="sd">            given module unless qconfig for the submodules are specified (when</span>
<span class="sd">            the submodule already has qconfig attribute)</span>

<span class="sd">    Return:</span>
<span class="sd">        None, module is modified inplace with qconfig attached</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="n">qconfig_dict</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">qconfig_dict</span> <span class="o">=</span> <span class="p">{}</span>
    <span class="n">_propagate_qconfig_helper</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">qconfig_dict</span><span class="p">)</span></div>

<span class="k">def</span> <span class="nf">_observer_forward_hook</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="nb">input</span><span class="p">,</span> <span class="n">output</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Forward hook that calls observer on the output</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">activation_post_process</span><span class="p">(</span><span class="n">output</span><span class="p">)</span>

<div class="viewcode-block" id="add_observer_"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.add_observer_">[docs]</a><span class="k">def</span> <span class="nf">add_observer_</span><span class="p">(</span><span class="n">module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Add observer for the leaf child of the module.</span>

<span class="sd">    This function insert observer module to all leaf child module that</span>
<span class="sd">    has a valid qconfig attribute.</span>

<span class="sd">    Args:</span>
<span class="sd">        module: input module with qconfig attributes for all the leaf modules that we want to quantize</span>

<span class="sd">    Return:</span>
<span class="sd">        None, module is modified inplace with added observer modules and forward_hooks</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">for</span> <span class="n">child</span> <span class="ow">in</span> <span class="n">module</span><span class="o">.</span><span class="n">children</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">child</span><span class="p">)</span> <span class="o">==</span> <span class="n">nnq</span><span class="o">.</span><span class="n">FloatFunctional</span><span class="p">:</span>
            <span class="k">if</span> <span class="nb">hasattr</span><span class="p">(</span><span class="n">child</span><span class="p">,</span> <span class="s1">&#39;qconfig&#39;</span><span class="p">)</span> <span class="ow">and</span> <span class="n">child</span><span class="o">.</span><span class="n">qconfig</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
                <span class="n">child</span><span class="o">.</span><span class="n">activation_post_process</span> <span class="o">=</span> <span class="n">child</span><span class="o">.</span><span class="n">qconfig</span><span class="o">.</span><span class="n">activation</span><span class="p">()</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">add_observer_</span><span class="p">(</span><span class="n">child</span><span class="p">)</span>

    <span class="c1"># Insert observers only for leaf nodes, note that this observer is for</span>
    <span class="c1"># the output of the module, for input QuantStub will observe them</span>
    <span class="k">if</span> <span class="nb">hasattr</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="s1">&#39;qconfig&#39;</span><span class="p">)</span> <span class="ow">and</span> <span class="n">module</span><span class="o">.</span><span class="n">qconfig</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> \
       <span class="nb">len</span><span class="p">(</span><span class="n">module</span><span class="o">.</span><span class="n">_modules</span><span class="p">)</span> <span class="o">==</span> <span class="mi">0</span> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">nn</span><span class="o">.</span><span class="n">Sequential</span><span class="p">):</span>
        <span class="c1"># observer and hook will be gone after we swap the module</span>
        <span class="n">module</span><span class="o">.</span><span class="n">add_module</span><span class="p">(</span><span class="s1">&#39;activation_post_process&#39;</span><span class="p">,</span> <span class="n">module</span><span class="o">.</span><span class="n">qconfig</span><span class="o">.</span><span class="n">activation</span><span class="p">())</span>
        <span class="n">module</span><span class="o">.</span><span class="n">register_forward_hook</span><span class="p">(</span><span class="n">_observer_forward_hook</span><span class="p">)</span></div>

<div class="viewcode-block" id="add_quant_dequant"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.add_quant_dequant">[docs]</a><span class="k">def</span> <span class="nf">add_quant_dequant</span><span class="p">(</span><span class="n">module</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Wrap the leaf child module in QuantWrapper if it has a valid qconfig</span>
<span class="sd">    Note that this function will modify the children of module inplace and it</span>
<span class="sd">    can return a new module which wraps the input module as well.</span>

<span class="sd">    Args:</span>
<span class="sd">        module: input module with qconfig attributes for all the leaf modules</span>
<span class="sd">        that we want to quantize</span>

<span class="sd">    Return:</span>
<span class="sd">        Either the inplace modified module with submodules wrapped in</span>
<span class="sd">        `QuantWrapper` based on qconfig or a new `QuantWrapper` module which</span>
<span class="sd">        wraps the input module, the latter case only happens when the input</span>
<span class="sd">        module is a leaf module and we want to quantize it.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">module</span><span class="o">.</span><span class="n">_modules</span><span class="p">)</span> <span class="o">==</span> <span class="mi">0</span> <span class="ow">and</span> <span class="nb">hasattr</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="s1">&#39;qconfig&#39;</span><span class="p">)</span> <span class="ow">and</span> <span class="n">module</span><span class="o">.</span><span class="n">qconfig</span><span class="p">:</span>
        <span class="k">return</span> <span class="n">QuantWrapper</span><span class="p">(</span><span class="n">module</span><span class="p">)</span>

    <span class="k">for</span> <span class="n">name</span><span class="p">,</span> <span class="n">child</span> <span class="ow">in</span> <span class="n">module</span><span class="o">.</span><span class="n">named_children</span><span class="p">():</span>
        <span class="n">module</span><span class="o">.</span><span class="n">_modules</span><span class="p">[</span><span class="n">name</span><span class="p">]</span> <span class="o">=</span> <span class="n">add_quant_dequant</span><span class="p">(</span><span class="n">child</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">module</span></div>

<div class="viewcode-block" id="prepare"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.prepare">[docs]</a><span class="k">def</span> <span class="nf">prepare</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Prepares a copy of the model for quantization calibration or quantization-aware training.</span>

<span class="sd">    Quantization configuration should be assigned preemptively</span>
<span class="sd">    to individual submodules in `.qconfig` attribute.</span>

<span class="sd">    The model will be attached with observer or fake quant modules, and qconfig</span>
<span class="sd">    will be propagated.</span>

<span class="sd">    Args:</span>
<span class="sd">        model: input model to be modified in-place</span>
<span class="sd">        inplace: carry out model transformations in-place, the original module is mutated</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">inplace</span><span class="p">:</span>
        <span class="n">model</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">model</span><span class="p">)</span>
    <span class="n">propagate_qconfig_</span><span class="p">(</span><span class="n">model</span><span class="p">)</span>
    <span class="c1"># sanity check common API misusage</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="nb">any</span><span class="p">(</span><span class="nb">hasattr</span><span class="p">(</span><span class="n">m</span><span class="p">,</span> <span class="s1">&#39;qconfig&#39;</span><span class="p">)</span> <span class="ow">and</span> <span class="n">m</span><span class="o">.</span><span class="n">qconfig</span> <span class="k">for</span> <span class="n">m</span> <span class="ow">in</span> <span class="n">model</span><span class="o">.</span><span class="n">modules</span><span class="p">()):</span>
        <span class="n">warnings</span><span class="o">.</span><span class="n">warn</span><span class="p">(</span><span class="s2">&quot;None of the submodule got qconfig applied. Make sure you &quot;</span>
                      <span class="s2">&quot;passed correct configuration through `qconfig_dict` or &quot;</span>
                      <span class="s2">&quot;by assigning the `.qconfig` attribute directly on submodules&quot;</span><span class="p">)</span>
    <span class="n">add_observer_</span><span class="p">(</span><span class="n">model</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">model</span></div>

<div class="viewcode-block" id="quantize"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.quantize">[docs]</a><span class="k">def</span> <span class="nf">quantize</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">run_fn</span><span class="p">,</span> <span class="n">run_args</span><span class="p">,</span> <span class="n">mapping</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Converts a float model to quantized model.</span>

<span class="sd">    First it will prepare the model for calibration or training, then it calls</span>
<span class="sd">    `run_fn` which will run the calibration step or training step,</span>
<span class="sd">    after that we will call `convert` which will convert the model to a</span>
<span class="sd">    quantized model.</span>

<span class="sd">    Args:</span>
<span class="sd">        model: input model</span>
<span class="sd">        run_fn: a function for evaluating the prepared model, can be a</span>
<span class="sd">            function that simply runs the prepared model or a training loop</span>
<span class="sd">        run_args: positional arguments for `run_fn`</span>
<span class="sd">        inplace: carry out model transformations in-place, the original module is mutated</span>
<span class="sd">        mapping: correspondence between original module types and quantized counterparts</span>

<span class="sd">    Return:</span>
<span class="sd">        Quantized model.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="n">mapping</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">mapping</span> <span class="o">=</span> <span class="n">DEFAULT_MODULE_MAPPING</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">inplace</span><span class="p">:</span>
        <span class="n">model</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">model</span><span class="p">)</span>
    <span class="n">model</span><span class="o">.</span><span class="n">eval</span><span class="p">()</span>
    <span class="n">prepare</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
    <span class="n">run_fn</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">run_args</span><span class="p">)</span>
    <span class="n">convert</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">mapping</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">model</span></div>

<div class="viewcode-block" id="quantize_dynamic"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.quantize_dynamic">[docs]</a><span class="k">def</span> <span class="nf">quantize_dynamic</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">qconfig_spec</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">qint8</span><span class="p">,</span>
                     <span class="n">mapping</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Converts a float model to dynamic (i.e. weights-only) quantized model.</span>

<span class="sd">    Replaces specified modules with dynamic weight-only quantized versions and output the quantized model.</span>

<span class="sd">    For simplest usage provide `dtype` argument that can be float16 or qint8. Weight-only quantization</span>
<span class="sd">    by default is performed for layers with large weights size - i.e. Linear and RNN variants.</span>

<span class="sd">    Fine grained control is possible with `qconfig` and `mapping` that act similarly to `quantize()`.</span>
<span class="sd">    If `qconfig` is provided, the `dtype` argument is ignored.</span>

<span class="sd">    Args:</span>
<span class="sd">        module: input model</span>
<span class="sd">        qconfig_spec: Either:</span>

<span class="sd">            - A dictionary that maps from name or type of submodule to quantization</span>
<span class="sd">              configuration, qconfig applies to all submodules of a given</span>
<span class="sd">              module unless qconfig for the submodules are specified (when the</span>
<span class="sd">              submodule already has qconfig attribute). Entries in the dictionary</span>
<span class="sd">              need to be QConfigDynamic instances.</span>

<span class="sd">            - A set of types and/or submodule names to apply dynamic quantization to,</span>
<span class="sd">              in which case the `dtype` argument is used to specifiy the bit-width</span>

<span class="sd">        inplace: carry out model transformations in-place, the original module is mutated</span>
<span class="sd">        mapping: maps type of a submodule to a type of corresponding dynamically quantized version</span>
<span class="sd">            with which the submodule needs to be replaced</span>

<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="n">qconfig_spec</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
        <span class="k">if</span> <span class="n">dtype</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">qint8</span><span class="p">:</span>
            <span class="n">qconfig_spec</span> <span class="o">=</span> <span class="p">{</span>
                <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span> <span class="p">:</span> <span class="n">default_dynamic_qconfig</span><span class="p">,</span>
                <span class="n">nn</span><span class="o">.</span><span class="n">LSTM</span> <span class="p">:</span> <span class="n">default_dynamic_qconfig</span><span class="p">,</span>
            <span class="p">}</span>
        <span class="k">elif</span> <span class="n">dtype</span> <span class="o">==</span> <span class="n">torch</span><span class="o">.</span><span class="n">float16</span><span class="p">:</span>
            <span class="n">qconfig_spec</span> <span class="o">=</span> <span class="p">{</span>
                <span class="n">nn</span><span class="o">.</span><span class="n">Linear</span> <span class="p">:</span> <span class="n">float16_dynamic_qconfig</span><span class="p">,</span>
                <span class="n">nn</span><span class="o">.</span><span class="n">LSTM</span> <span class="p">:</span> <span class="n">float16_dynamic_qconfig</span><span class="p">,</span>
            <span class="p">}</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span>
                <span class="s2">&quot;Don&#39;t know how to quantize with default settings for </span><span class="si">{}</span><span class="s2">. Provide full qconfig please&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">dtype</span><span class="p">))</span>
    <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">qconfig_spec</span><span class="p">,</span> <span class="nb">set</span><span class="p">):</span>
        <span class="k">if</span> <span class="n">dtype</span> <span class="ow">is</span> <span class="n">torch</span><span class="o">.</span><span class="n">qint8</span><span class="p">:</span>
            <span class="n">default_qconfig</span> <span class="o">=</span> <span class="n">default_dynamic_qconfig</span>
        <span class="k">elif</span> <span class="n">dtype</span> <span class="ow">is</span> <span class="n">torch</span><span class="o">.</span><span class="n">float16</span><span class="p">:</span>
            <span class="n">default_qconfig</span> <span class="o">=</span> <span class="n">float16_dynamic_qconfig</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s1">&#39;Unknown dtype specified for quantize_dynamic: &#39;</span><span class="p">,</span> <span class="nb">str</span><span class="p">(</span><span class="n">dtype</span><span class="p">))</span>
        <span class="n">qconfig_spec</span> <span class="o">=</span> <span class="nb">dict</span><span class="p">(</span><span class="nb">zip</span><span class="p">(</span><span class="n">qconfig_spec</span><span class="p">,</span> <span class="n">itertools</span><span class="o">.</span><span class="n">repeat</span><span class="p">(</span><span class="n">default_qconfig</span><span class="p">)))</span>

    <span class="k">if</span> <span class="n">mapping</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">mapping</span> <span class="o">=</span> <span class="n">DEFAULT_DYNAMIC_MODULE_MAPPING</span>

    <span class="k">if</span> <span class="ow">not</span> <span class="n">inplace</span><span class="p">:</span>
        <span class="n">model</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">model</span><span class="p">)</span>
    <span class="n">model</span><span class="o">.</span><span class="n">eval</span><span class="p">()</span>
    <span class="n">propagate_qconfig_</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">qconfig_spec</span><span class="p">)</span>
    <span class="n">convert</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">mapping</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">model</span></div>

<div class="viewcode-block" id="prepare_qat"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.prepare_qat">[docs]</a><span class="k">def</span> <span class="nf">prepare_qat</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">mapping</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Prepares a copy of the model for quantization calibration or</span>
<span class="sd">    quantization-aware training and convers it to quantized version.</span>

<span class="sd">    Quantization configuration should be assigned preemptively</span>
<span class="sd">    to individual submodules in `.qconfig` attribute.</span>

<span class="sd">    Args:</span>
<span class="sd">        model: input model to be modified in-place</span>
<span class="sd">        mapping: dictionary that maps float modules to quantized modules to be</span>
<span class="sd">                 replaced.</span>
<span class="sd">        inplace: carry out model transformations in-place, the original module</span>
<span class="sd">                 is mutated</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="n">mapping</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">mapping</span> <span class="o">=</span> <span class="n">DEFAULT_QAT_MODULE_MAPPING</span>
    <span class="n">model</span> <span class="o">=</span> <span class="n">prepare</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="n">inplace</span><span class="p">)</span>
    <span class="n">convert</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">mapping</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">model</span></div>

<div class="viewcode-block" id="quantize_qat"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.quantize_qat">[docs]</a><span class="k">def</span> <span class="nf">quantize_qat</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">run_fn</span><span class="p">,</span> <span class="n">run_args</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Do quantization aware training and output a quantized model</span>

<span class="sd">    Args:</span>
<span class="sd">        model: input model</span>
<span class="sd">        run_fn: a function for evaluating the prepared model, can be a</span>
<span class="sd">                function that simply runs the prepared model or a training</span>
<span class="sd">                loop</span>
<span class="sd">        run_args: positional arguments for `run_fn`</span>

<span class="sd">    Return:</span>
<span class="sd">        Quantized model.</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">inplace</span><span class="p">:</span>
        <span class="n">model</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">model</span><span class="p">)</span>
    <span class="n">model</span><span class="o">.</span><span class="n">train</span><span class="p">()</span>
    <span class="n">prepare_qat</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
    <span class="n">run_fn</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">run_args</span><span class="p">)</span>
    <span class="n">convert</span><span class="p">(</span><span class="n">model</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">model</span></div>

<div class="viewcode-block" id="convert"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.convert">[docs]</a><span class="k">def</span> <span class="nf">convert</span><span class="p">(</span><span class="n">module</span><span class="p">,</span> <span class="n">mapping</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">False</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Converts the float module with observers (where we can get quantization</span>
<span class="sd">    parameters) to a quantized module.</span>

<span class="sd">    Args:</span>
<span class="sd">        module: calibrated module with observers</span>
<span class="sd">        mapping: a dictionary that maps from float module type to quantized</span>
<span class="sd">                 module type, can be overwrritten to allow swapping user defined</span>
<span class="sd">                 Modules</span>
<span class="sd">        inplace: carry out model transformations in-place, the original module</span>
<span class="sd">                 is mutated</span>

<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">if</span> <span class="n">mapping</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
        <span class="n">mapping</span> <span class="o">=</span> <span class="n">DEFAULT_MODULE_MAPPING</span>
    <span class="k">if</span> <span class="ow">not</span> <span class="n">inplace</span><span class="p">:</span>
        <span class="n">module</span> <span class="o">=</span> <span class="n">copy</span><span class="o">.</span><span class="n">deepcopy</span><span class="p">(</span><span class="n">module</span><span class="p">)</span>
    <span class="n">reassign</span> <span class="o">=</span> <span class="p">{}</span>
    <span class="c1"># TODO(jerryzh): remove after deciding on the impl of intrinsic modules</span>
    <span class="c1"># This is required because intrinsic modules right now are implemented as</span>
    <span class="c1"># nn.Sequential and we don&#39;t want to swap their constituents</span>
    <span class="n">SWAPPABLE_MODULES</span> <span class="o">=</span> <span class="p">(</span><span class="n">nni</span><span class="o">.</span><span class="n">ConvBn2d</span><span class="p">,</span>
                         <span class="n">nni</span><span class="o">.</span><span class="n">ConvBnReLU2d</span><span class="p">,</span>
                         <span class="n">nni</span><span class="o">.</span><span class="n">LinearReLU</span><span class="p">,</span>
                         <span class="n">nni</span><span class="o">.</span><span class="n">ConvReLU2d</span><span class="p">,</span>
                         <span class="n">nni</span><span class="o">.</span><span class="n">ConvReLU3d</span><span class="p">)</span>

    <span class="k">for</span> <span class="n">name</span><span class="p">,</span> <span class="n">mod</span> <span class="ow">in</span> <span class="n">module</span><span class="o">.</span><span class="n">named_children</span><span class="p">():</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">mod</span><span class="p">)</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">SWAPPABLE_MODULES</span><span class="p">:</span>
            <span class="n">convert</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">mapping</span><span class="p">,</span> <span class="n">inplace</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
        <span class="n">reassign</span><span class="p">[</span><span class="n">name</span><span class="p">]</span> <span class="o">=</span> <span class="n">swap_module</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">mapping</span><span class="p">)</span>

    <span class="k">for</span> <span class="n">key</span><span class="p">,</span> <span class="n">value</span> <span class="ow">in</span> <span class="n">reassign</span><span class="o">.</span><span class="n">items</span><span class="p">():</span>
        <span class="n">module</span><span class="o">.</span><span class="n">_modules</span><span class="p">[</span><span class="n">key</span><span class="p">]</span> <span class="o">=</span> <span class="n">value</span>

    <span class="k">return</span> <span class="n">module</span></div>

<div class="viewcode-block" id="swap_module"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.swap_module">[docs]</a><span class="k">def</span> <span class="nf">swap_module</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">mapping</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Swaps the module if it has a quantized counterpart and it has an</span>
<span class="sd">    `observer` attached.</span>

<span class="sd">    Args:</span>
<span class="sd">        mod: input module</span>
<span class="sd">        mapping: a dictionary that maps from nn module to nnq module</span>

<span class="sd">    Return:</span>
<span class="sd">        The corresponding quantized module of `mod`</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="n">new_mod</span> <span class="o">=</span> <span class="n">mod</span>
    <span class="c1"># Always replace dequantstub with dequantize</span>
    <span class="k">if</span> <span class="nb">hasattr</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="s1">&#39;qconfig&#39;</span><span class="p">)</span> <span class="ow">and</span> <span class="n">mod</span><span class="o">.</span><span class="n">qconfig</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">or</span> <span class="nb">type</span><span class="p">(</span><span class="n">mod</span><span class="p">)</span> <span class="o">==</span> <span class="n">DeQuantStub</span><span class="p">:</span>
        <span class="k">if</span> <span class="nb">type</span><span class="p">(</span><span class="n">mod</span><span class="p">)</span> <span class="ow">in</span> <span class="n">mapping</span><span class="p">:</span>
            <span class="n">new_mod</span> <span class="o">=</span> <span class="n">mapping</span><span class="p">[</span><span class="nb">type</span><span class="p">(</span><span class="n">mod</span><span class="p">)]</span><span class="o">.</span><span class="n">from_float</span><span class="p">(</span><span class="n">mod</span><span class="p">)</span>
    <span class="k">return</span> <span class="n">new_mod</span></div>

<div class="viewcode-block" id="get_observer_dict"><a class="viewcode-back" href="../../../quantization.html#torch.quantization.get_observer_dict">[docs]</a><span class="k">def</span> <span class="nf">get_observer_dict</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">target_dict</span><span class="p">,</span> <span class="n">prefix</span><span class="o">=</span><span class="s2">&quot;&quot;</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;Traverse the modules and save all observers into dict.</span>
<span class="sd">    This is mainly used for quantization accuracy debug</span>
<span class="sd">    Args:</span>
<span class="sd">        mod: the top module we want to save all observers</span>
<span class="sd">        prefix: the prefix for the current module</span>
<span class="sd">        target_dict: the dictionary used to save all the observers</span>
<span class="sd">    &quot;&quot;&quot;</span>
    <span class="k">def</span> <span class="nf">get_prefix</span><span class="p">(</span><span class="n">prefix</span><span class="p">):</span>
        <span class="k">return</span> <span class="n">prefix</span> <span class="k">if</span> <span class="n">prefix</span> <span class="o">==</span> <span class="s2">&quot;&quot;</span> <span class="k">else</span> <span class="n">prefix</span> <span class="o">+</span> <span class="s1">&#39;.&#39;</span>

    <span class="k">if</span> <span class="nb">hasattr</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="s1">&#39;activation_post_process&#39;</span><span class="p">):</span>
        <span class="n">target_dict</span><span class="p">[</span><span class="n">get_prefix</span><span class="p">(</span><span class="n">prefix</span><span class="p">)</span> <span class="o">+</span> <span class="s1">&#39;activation_post_process&#39;</span><span class="p">]</span> <span class="o">=</span> <span class="n">mod</span><span class="o">.</span><span class="n">activation_post_process</span>
    <span class="k">for</span> <span class="n">name</span><span class="p">,</span> <span class="n">child</span> <span class="ow">in</span> <span class="n">mod</span><span class="o">.</span><span class="n">named_children</span><span class="p">():</span>
        <span class="n">module_prefix</span> <span class="o">=</span> <span class="n">get_prefix</span><span class="p">(</span><span class="n">prefix</span><span class="p">)</span> <span class="o">+</span> <span class="n">name</span> <span class="k">if</span> <span class="n">prefix</span> <span class="k">else</span> <span class="n">name</span>
        <span class="n">get_observer_dict</span><span class="p">(</span><span class="n">child</span><span class="p">,</span> <span class="n">target_dict</span><span class="p">,</span> <span class="n">module_prefix</span><span class="p">)</span></div>
</pre></div>

             </article>
             
            </div>
            <footer>
  

  

    <hr>

  

  <div role="contentinfo">
    <p>
        &copy; Copyright 2019, Torch Contributors.

    </p>
  </div>
    
      <div>
        Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
      </div>
     

</footer>

          </div>
        </div>

        <div class="pytorch-content-right" id="pytorch-content-right">
          <div class="pytorch-right-menu" id="pytorch-right-menu">
            <div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
              
            </div>
          </div>
        </div>
      </section>
    </div>

  


  

     
       <script type="text/javascript" id="documentation_options" data-url_root="../../../" src="../../../_static/documentation_options.js"></script>
         <script src="../../../_static/jquery.js"></script>
         <script src="../../../_static/underscore.js"></script>
         <script src="../../../_static/doctools.js"></script>
         <script src="../../../_static/language_data.js"></script>
     

  

  <script type="text/javascript" src="../../../_static/js/vendor/popper.min.js"></script>
  <script type="text/javascript" src="../../../_static/js/vendor/bootstrap.min.js"></script>
  <script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
  <script type="text/javascript" src="../../../_static/js/theme.js"></script>

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>
 
<script>
  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
  })(window,document,'script','https://www.google-analytics.com/analytics.js','ga');

  ga('create', 'UA-90545585-1', 'auto');
  ga('send', 'pageview');

</script>

<script async src="https://www.googletagmanager.com/gtag/js?id=UA-117752657-2"></script>

<script>
  window.dataLayer = window.dataLayer || [];

  function gtag(){dataLayer.push(arguments);}

  gtag('js', new Date());
  gtag('config', 'UA-117752657-2');
</script>

<img height="1" width="1" style="border-style:none;" alt="" src="https://www.googleadservices.com/pagead/conversion/795629140/?label=txkmCPmdtosBENSssfsC&amp;guid=ON&amp;script=0"/>


  <!-- Begin Footer -->

  <div class="container-fluid docs-tutorials-resources" id="docs-tutorials-resources">
    <div class="container">
      <div class="row">
        <div class="col-md-4 text-center">
          <h2>Docs</h2>
          <p>Access comprehensive developer documentation for PyTorch</p>
          <a class="with-right-arrow" href="https://pytorch.org/docs/stable/index.html">View Docs</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Tutorials</h2>
          <p>Get in-depth tutorials for beginners and advanced developers</p>
          <a class="with-right-arrow" href="https://pytorch.org/tutorials">View Tutorials</a>
        </div>

        <div class="col-md-4 text-center">
          <h2>Resources</h2>
          <p>Find development resources and get your questions answered</p>
          <a class="with-right-arrow" href="https://pytorch.org/resources">View Resources</a>
        </div>
      </div>
    </div>
  </div>

  <footer class="site-footer">
    <div class="container footer-container">
      <div class="footer-logo-wrapper">
        <a href="https://pytorch.org/" class="footer-logo"></a>
      </div>

      <div class="footer-links-wrapper">
        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/">PyTorch</a></li>
            <li><a href="https://pytorch.org/get-started">Get Started</a></li>
            <li><a href="https://pytorch.org/features">Features</a></li>
            <li><a href="https://pytorch.org/ecosystem">Ecosystem</a></li>
            <li><a href="https://pytorch.org/blog/">Blog</a></li>
            <li><a href="https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md">Contributing</a></li>
          </ul>
        </div>

        <div class="footer-links-col">
          <ul>
            <li class="list-title"><a href="https://pytorch.org/resources">Resources</a></li>
            <li><a href="https://pytorch.org/tutorials">Tutorials</a></li>
            <li><a href="https://pytorch.org/docs/stable/index.html">Docs</a></li>
            <li><a href="https://discuss.pytorch.org" target="_blank">Discuss</a></li>
            <li><a href="https://github.com/pytorch/pytorch/issues" target="_blank">Github Issues</a></li>
            <li><a href="https://pytorch.org/assets/brand-guidelines/PyTorch-Brand-Guidelines.pdf" target="_blank">Brand Guidelines</a></li>
          </ul>
        </div>

        <div class="footer-links-col follow-us-col">
          <ul>
            <li class="list-title">Stay Connected</li>
            <li>
              <div id="mc_embed_signup">
                <form
                  action="https://twitter.us14.list-manage.com/subscribe/post?u=75419c71fe0a935e53dfa4a3f&id=91d0dccd39"
                  method="post"
                  id="mc-embedded-subscribe-form"
                  name="mc-embedded-subscribe-form"
                  class="email-subscribe-form validate"
                  target="_blank"
                  novalidate>
                  <div id="mc_embed_signup_scroll" class="email-subscribe-form-fields-wrapper">
                    <div class="mc-field-group">
                      <label for="mce-EMAIL" style="display:none;">Email Address</label>
                      <input type="email" value="" name="EMAIL" class="required email" id="mce-EMAIL" placeholder="Email Address">
                    </div>

                    <div id="mce-responses" class="clear">
                      <div class="response" id="mce-error-response" style="display:none"></div>
                      <div class="response" id="mce-success-response" style="display:none"></div>
                    </div>    <!-- real people should not fill this in and expect good things - do not remove this or risk form bot signups-->

                    <div style="position: absolute; left: -5000px;" aria-hidden="true"><input type="text" name="b_75419c71fe0a935e53dfa4a3f_91d0dccd39" tabindex="-1" value=""></div>

                    <div class="clear">
                      <input type="submit" value="" name="subscribe" id="mc-embedded-subscribe" class="button email-subscribe-button">
                    </div>
                  </div>
                </form>
              </div>

            </li>
          </ul>

          <div class="footer-social-icons">
            <a href="https://www.facebook.com/pytorch" target="_blank" class="facebook"></a>
            <a href="https://twitter.com/pytorch" target="_blank" class="twitter"></a>
            <a href="https://www.youtube.com/pytorch" target="_blank" class="youtube"></a>
          </div>
        </div>
      </div>
    </div>
  </footer>

  <div class="cookie-banner-wrapper">
  <div class="container">
    <p class="gdpr-notice">To analyze traffic and optimize your experience, we serve cookies on this site. By clicking or navigating, you agree to allow our usage of cookies. As the current maintainers of this site, Facebook’s Cookies Policy applies. Learn more, including about available controls: <a href="https://www.facebook.com/policies/cookies/">Cookies Policy</a>.</p>
    <img class="close-button" src="../../../_static/images/pytorch-x.svg">
  </div>
</div>

  <!-- End Footer -->

  <!-- Begin Mobile Menu -->

  <div class="mobile-main-menu">
    <div class="container-fluid">
      <div class="container">
        <div class="mobile-main-menu-header-container">
          <a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
          <a class="main-menu-close-button" href="#" data-behavior="close-mobile-menu"></a>
        </div>
      </div>
    </div>

    <div class="mobile-main-menu-links-container">
      <div class="main-menu">
        <ul>
          <li>
            <a href="https://pytorch.org/get-started">Get Started</a>
          </li>

          <li>
            <a href="https://pytorch.org/features">Features</a>
          </li>

          <li>
            <a href="https://pytorch.org/ecosystem">Ecosystem</a>
          </li>

          <li>
            <a href="https://pytorch.org/mobile">Mobile</a>
          </li>

          <li>
            <a href="https://pytorch.org/hub">PyTorch Hub</a>
          </li>

          <li>
            <a href="https://pytorch.org/blog/">Blog</a>
          </li>

          <li>
            <a href="https://pytorch.org/tutorials">Tutorials</a>
          </li>

          <li class="active">
            <a href="https://pytorch.org/docs/stable/index.html">Docs</a>
          </li>

          <li>
            <a href="https://pytorch.org/resources">Resources</a>
          </li>

          <li>
            <a href="https://github.com/pytorch/pytorch">Github</a>
          </li>
        </ul>
      </div>
    </div>
  </div>

  <!-- End Mobile Menu -->

  <script type="text/javascript" src="../../../_static/js/vendor/anchor.min.js"></script>

  <script type="text/javascript">
    $(document).ready(function() {
      mobileMenu.bind();
      mobileTOC.bind();
      pytorchAnchors.bind();
      sideMenus.bind();
      scrollToAnchor.bind();
      highlightNavigation.bind();
      mainMenuDropdown.bind();
      filterTags.bind();

      // Remove any empty p tags that Sphinx adds
      $("[data-tags='null']").remove();

      // Add class to links that have code blocks, since we cannot create links in code blocks
      $("article.pytorch-article a span.pre").each(function(e) {
        $(this).closest("a").addClass("has-code");
      });
    })
  </script>
</body>
</html>