

<!DOCTYPE html>
<html class="writer-html5" lang="en" >
<head>
  <meta charset="utf-8" />
  
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
  
  <title>mindspore.nn.layer.quant &mdash; MindSpore master documentation</title>
  

  
  <link rel="stylesheet" href="../../../../_static/css/theme.css" type="text/css" />
  <link rel="stylesheet" href="../../../../_static/pygments.css" type="text/css" />

  
  

  
  

  

  
  <!--[if lt IE 9]>
    <script src="../../../../_static/js/html5shiv.min.js"></script>
  <![endif]-->
  
    
      <script type="text/javascript" id="documentation_options" data-url_root="../../../../" src="../../../../_static/documentation_options.js"></script>
        <script src="../../../../_static/jquery.js"></script>
        <script src="../../../../_static/underscore.js"></script>
        <script src="../../../../_static/doctools.js"></script>
        <script src="../../../../_static/language_data.js"></script>
        <script async="async" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/latest.js?config=TeX-AMS-MML_HTMLorMML"></script>
    
    <script type="text/javascript" src="../../../../_static/js/theme.js"></script>

    
    <link rel="index" title="Index" href="../../../../genindex.html" />
    <link rel="search" title="Search" href="../../../../search.html" /> 
</head>

<body class="wy-body-for-nav">

   
  <div class="wy-grid-for-nav">
    
    <nav data-toggle="wy-nav-shift" class="wy-nav-side">
      <div class="wy-side-scroll">
        <div class="wy-side-nav-search" >
          

          
            <a href="../../../../index.html" class="icon icon-home"> MindSpore
          

          
          </a>

          
            
            
          

          
<div role="search">
  <form id="rtd-search-form" class="wy-form" action="../../../../search.html" method="get">
    <input type="text" name="q" placeholder="Search docs" />
    <input type="hidden" name="check_keywords" value="yes" />
    <input type="hidden" name="area" value="default" />
  </form>
</div>

          
        </div>

        
        <div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
          
            
            
              
            
            
              <p class="caption"><span class="caption-text">MindSpore Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.html">mindspore</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.common.initializer.html">mindspore.common.initializer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.communication.html">mindspore.communication</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.compression.html">mindspore.compression</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.context.html">mindspore.context</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.html">mindspore.dataset</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.audio.html">mindspore.dataset.audio</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.config.html">mindspore.dataset.config</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.text.html">mindspore.dataset.text</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.transforms.html">mindspore.dataset.transforms</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.dataset.vision.html">mindspore.dataset.vision</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.mindrecord.html">mindspore.mindrecord</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.html">mindspore.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.probability.html">mindspore.nn.probability</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.nn.transformer.html">mindspore.nn.transformer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.numpy.html">mindspore.numpy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.ops.html">mindspore.ops</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.parallel.html">mindspore.parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.parallel.nn.html">mindspore.parallel.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.profiler.html">mindspore.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.scipy.html">mindspore.scipy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.train.html">mindspore.train</a></li>
<li class="toctree-l1"><a class="reference internal" href="../../../../api_python/mindspore.boost.html">mindspore.boost</a></li>
</ul>
<p class="caption"><span class="caption-text">MindSpore C++ API</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://www.mindspore.cn/lite/api/zh-CN/master/api_cpp/mindspore.html">MindSpore Lite↗</a></li>
</ul>

            
          
        </div>
        
      </div>
    </nav>

    <section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">

      
      <nav class="wy-nav-top" aria-label="top navigation">
        
          <i data-toggle="wy-nav-top" class="fa fa-bars"></i>
          <a href="../../../../index.html">MindSpore</a>
        
      </nav>


      <div class="wy-nav-content">
        
        <div class="rst-content">
        
          

















<div role="navigation" aria-label="breadcrumbs navigation">

  <ul class="wy-breadcrumbs">
    
      <li><a href="../../../../index.html" class="icon icon-home"></a> &raquo;</li>
        
          <li><a href="../../../index.html">Module code</a> &raquo;</li>
        
      <li>mindspore.nn.layer.quant</li>
    
    
      <li class="wy-breadcrumbs-aside">
        
      </li>
    
  </ul>

  
  <hr/>
</div>
          <div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
           <div itemprop="articleBody">
            
  <h1>Source code for mindspore.nn.layer.quant</h1><div class="highlight"><pre>
<span></span><span class="c1"># Copyright 2021 Huawei Technologies Co., Ltd</span>
<span class="c1">#</span>
<span class="c1"># Licensed under the Apache License, Version 2.0 (the &quot;License&quot;);</span>
<span class="c1"># you may not use this file except in compliance with the License.</span>
<span class="c1"># You may obtain a copy of the License at</span>
<span class="c1">#</span>
<span class="c1"># http://www.apache.org/licenses/LICENSE-2.0</span>
<span class="c1">#</span>
<span class="c1"># Unless required by applicable law or agreed to in writing, software</span>
<span class="c1"># distributed under the License is distributed on an &quot;AS IS&quot; BASIS,</span>
<span class="c1"># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.</span>
<span class="c1"># See the License for the specific language governing permissions and</span>
<span class="c1"># limitations under the License.</span>
<span class="c1"># ============================================================================</span>
<span class="sd">&quot;&quot;&quot;Quantization aware training.&quot;&quot;&quot;</span>

<span class="kn">from</span> <span class="nn">functools</span> <span class="kn">import</span> <span class="n">partial</span>
<span class="kn">from</span> <span class="nn">collections</span> <span class="kn">import</span> <span class="n">namedtuple</span>
<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="kn">import</span> <span class="nn">mindspore.common.dtype</span> <span class="k">as</span> <span class="nn">mstype</span>
<span class="kn">from</span> <span class="nn">mindspore.ops.primitive</span> <span class="kn">import</span> <span class="n">Primitive</span>
<span class="kn">from</span> <span class="nn">mindspore.ops</span> <span class="kn">import</span> <span class="n">operations</span> <span class="k">as</span> <span class="n">P</span>
<span class="kn">from</span> <span class="nn">mindspore.common.parameter</span> <span class="kn">import</span> <span class="n">Parameter</span>
<span class="kn">from</span> <span class="nn">mindspore.common.initializer</span> <span class="kn">import</span> <span class="n">initializer</span>
<span class="kn">from</span> <span class="nn">mindspore.common.tensor</span> <span class="kn">import</span> <span class="n">Tensor</span>
<span class="kn">from</span> <span class="nn">mindspore._checkparam</span> <span class="kn">import</span> <span class="n">Validator</span><span class="p">,</span> <span class="n">twice</span>
<span class="kn">from</span> <span class="nn">mindspore.compression.common</span> <span class="kn">import</span> <span class="n">QuantDtype</span>
<span class="kn">import</span> <span class="nn">mindspore.context</span> <span class="k">as</span> <span class="nn">context</span>
<span class="kn">from</span> <span class="nn">.normalization</span> <span class="kn">import</span> <span class="n">BatchNorm2d</span>
<span class="kn">from</span> <span class="nn">.activation</span> <span class="kn">import</span> <span class="n">get_activation</span>
<span class="kn">from</span> <span class="nn">..cell</span> <span class="kn">import</span> <span class="n">Cell</span>
<span class="kn">from</span> <span class="nn">...</span> <span class="kn">import</span> <span class="n">nn</span>
<span class="kn">from</span> <span class="nn">...ops.operations</span> <span class="kn">import</span> <span class="n">_quant_ops</span> <span class="k">as</span> <span class="n">Q</span>

<span class="n">__all__</span> <span class="o">=</span> <span class="p">[</span>
    <span class="s1">&#39;FakeQuantWithMinMaxObserver&#39;</span><span class="p">,</span>
    <span class="s1">&#39;Conv2dBnFoldQuantOneConv&#39;</span><span class="p">,</span>
    <span class="s1">&#39;Conv2dBnFoldQuant&#39;</span><span class="p">,</span>
    <span class="s1">&#39;Conv2dBnWithoutFoldQuant&#39;</span><span class="p">,</span>
    <span class="s1">&#39;Conv2dQuant&#39;</span><span class="p">,</span>
    <span class="s1">&#39;DenseQuant&#39;</span><span class="p">,</span>
    <span class="s1">&#39;ActQuant&#39;</span><span class="p">,</span>
    <span class="s1">&#39;TensorAddQuant&#39;</span><span class="p">,</span>
    <span class="s1">&#39;MulQuant&#39;</span><span class="p">,</span>
<span class="p">]</span>


<span class="k">class</span> <span class="nc">BatchNormFoldCell</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Batch Normalization folded.</span>

<span class="sd">    Args:</span>
<span class="sd">        momentum (float): Momentum value must be [0, 1]. Default: 0.9.</span>
<span class="sd">        epsilon (float): A small float number to avoid dividing by 0. 1e-5 if dtype in</span>
<span class="sd">            float32 else 1e-3. Default: 1e-5.</span>
<span class="sd">        freeze_bn (int): Delay in steps at which computation switches from regular batch</span>
<span class="sd">            norm to frozen mean and std. Default: 0.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - Tensor of shape :math:`(N, C, H, W)`.</span>
<span class="sd">        - **mean** (Tensor) - Tensor of shape :math:`(C,)`.</span>
<span class="sd">        - **variance** (Tensor) - Tensor of shape :math:`(C,)`.</span>
<span class="sd">        - **global_step** (Tensor) - Tensor to record current global step.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tuple of 4 Tensor, the normalized input and the updated parameters.</span>

<span class="sd">        - **batch_mean** (Tensor) - Tensor of shape :math:`(C,)`.</span>
<span class="sd">        - **batch_std** (Tensor) - Tensor of shape :math:`(C,)`.</span>
<span class="sd">        - **running_mean** (Tensor) - Tensor of shape :math:`(C,)`.</span>
<span class="sd">        - **running_std** (Tensor) - Tensor of shape :math:`(C,)`.</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="mf">0.9</span><span class="p">,</span> <span class="n">epsilon</span><span class="o">=</span><span class="mf">1e-5</span><span class="p">,</span> <span class="n">freeze_bn</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize batch norm fold layer&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">BatchNormFoldCell</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span> <span class="o">=</span> <span class="n">epsilon</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">is_gpu</span> <span class="o">=</span> <span class="n">context</span><span class="o">.</span><span class="n">get_context</span><span class="p">(</span><span class="s1">&#39;device_target&#39;</span><span class="p">)</span> <span class="o">==</span> <span class="s2">&quot;GPU&quot;</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">is_gpu</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bn_train</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">BatchNormFold</span><span class="p">(</span><span class="n">momentum</span><span class="p">,</span> <span class="n">epsilon</span><span class="p">,</span> <span class="n">is_training</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">freeze_bn</span><span class="o">=</span><span class="n">freeze_bn</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bn_infer</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">BatchNormFold</span><span class="p">(</span><span class="n">momentum</span><span class="p">,</span> <span class="n">epsilon</span><span class="p">,</span> <span class="n">is_training</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">freeze_bn</span><span class="o">=</span><span class="n">freeze_bn</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bn_reduce</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">BNTrainingReduce</span><span class="p">()</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bn_update</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">BatchNormFoldD</span><span class="p">(</span><span class="n">momentum</span><span class="p">,</span> <span class="n">epsilon</span><span class="p">,</span> <span class="n">is_training</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">freeze_bn</span><span class="o">=</span><span class="n">freeze_bn</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">,</span> <span class="n">mean</span><span class="p">,</span> <span class="n">variance</span><span class="p">,</span> <span class="n">global_step</span><span class="p">):</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">is_gpu</span><span class="p">:</span>
            <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">training</span><span class="p">:</span>
                <span class="n">batch_mean</span><span class="p">,</span> <span class="n">batch_std</span><span class="p">,</span> <span class="n">running_mean</span><span class="p">,</span> <span class="n">running_std</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">bn_train</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">mean</span><span class="p">,</span> <span class="n">variance</span><span class="p">,</span> <span class="n">global_step</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">batch_mean</span><span class="p">,</span> <span class="n">batch_std</span><span class="p">,</span> <span class="n">running_mean</span><span class="p">,</span> <span class="n">running_std</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">bn_infer</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">mean</span><span class="p">,</span> <span class="n">variance</span><span class="p">,</span> <span class="n">global_step</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">training</span><span class="p">:</span>
                <span class="n">x_sum</span><span class="p">,</span> <span class="n">x_square_sum</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">bn_reduce</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
                <span class="n">_</span><span class="p">,</span> <span class="n">batch_mean</span><span class="p">,</span> <span class="n">batch_std</span><span class="p">,</span> <span class="n">running_mean</span><span class="p">,</span> <span class="n">running_std</span><span class="p">,</span> <span class="n">mean_updated</span><span class="p">,</span> <span class="n">variance_updated</span> <span class="o">=</span> \
                    <span class="bp">self</span><span class="o">.</span><span class="n">bn_update</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">x_sum</span><span class="p">,</span> <span class="n">x_square_sum</span><span class="p">,</span> <span class="n">mean</span><span class="p">,</span> <span class="n">variance</span><span class="p">)</span>
                <span class="n">P</span><span class="o">.</span><span class="n">Assign</span><span class="p">()(</span><span class="n">mean</span><span class="p">,</span> <span class="n">mean_updated</span><span class="p">)</span>
                <span class="n">P</span><span class="o">.</span><span class="n">Assign</span><span class="p">()(</span><span class="n">variance</span><span class="p">,</span> <span class="n">variance_updated</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">batch_mean</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">ZerosLike</span><span class="p">()(</span><span class="n">variance</span><span class="p">)</span>
                <span class="n">batch_std</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">OnesLike</span><span class="p">()(</span><span class="n">variance</span><span class="p">)</span>
                <span class="n">running_mean</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Add</span><span class="p">()(</span><span class="n">mean</span><span class="p">,</span> <span class="mf">0.</span><span class="p">)</span>
                <span class="n">running_std</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Sqrt</span><span class="p">()(</span><span class="n">P</span><span class="o">.</span><span class="n">Add</span><span class="p">()(</span><span class="n">variance</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">epsilon</span><span class="p">))</span>
        <span class="k">return</span> <span class="n">batch_mean</span><span class="p">,</span> <span class="n">batch_std</span><span class="p">,</span> <span class="n">running_mean</span><span class="p">,</span> <span class="n">running_std</span>


<span class="k">def</span> <span class="nf">_partial_init</span><span class="p">(</span><span class="n">cls_or_self</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Wrapper that allows creation of class factories.</span>

<span class="sd">    This can be useful when there is a need to create classes with the same</span>
<span class="sd">    constructor arguments, but different instances.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; class Foo:</span>
<span class="sd">        ...     def __init__(self, a, b, answer):</span>
<span class="sd">        ...         pass</span>
<span class="sd">        &gt;&gt;&gt; Foo.partial_init = classmethod(_partial_init)</span>
<span class="sd">        &gt;&gt;&gt; foo_builder = Foo.partial_init(a=3, b=4).partial_init(answer=42)</span>
<span class="sd">        &gt;&gt;&gt; foo_instance1 = foo_builder()</span>
<span class="sd">        &gt;&gt;&gt; foo_instance2 = foo_builder()</span>
<span class="sd">        &gt;&gt;&gt; result = (id(foo_instance1) == id(foo_instance2))</span>
<span class="sd">        &gt;&gt;&gt; print(result)</span>
<span class="sd">        False</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">class</span> <span class="nc">_PartialWrapper</span><span class="p">:</span>
        <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        class of wrapper that allows creation of class factories.</span>
<span class="sd">        &quot;&quot;&quot;</span>

        <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">p</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">p</span> <span class="o">=</span> <span class="n">p</span>

        <span class="k">def</span> <span class="fm">__call__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">keywords</span><span class="p">):</span>
            <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">p</span><span class="p">(</span><span class="o">*</span><span class="n">args</span><span class="p">,</span> <span class="o">**</span><span class="n">keywords</span><span class="p">)</span>

        <span class="k">def</span> <span class="fm">__repr__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
            <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">p</span><span class="o">.</span><span class="fm">__repr__</span><span class="p">()</span>

        <span class="n">partial_init</span> <span class="o">=</span> <span class="n">_partial_init</span>

    <span class="n">r</span> <span class="o">=</span> <span class="n">_PartialWrapper</span><span class="p">(</span><span class="n">partial</span><span class="p">(</span><span class="n">cls_or_self</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">))</span>
    <span class="k">return</span> <span class="n">r</span>


<span class="k">class</span> <span class="nc">_Observer</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Base class of Observer. Observer is used to calculate the statistics of specific layer.</span>

<span class="sd">    Notes:</span>
<span class="sd">        This class is an abstract class.</span>

<span class="sd">    Args:</span>
<span class="sd">        quant_dtype (QuantDtype): The type of FakeQuant data.</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">quant_dtype</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize _Observer.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">_Observer</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">quant_dtype</span> <span class="o">=</span> <span class="n">quant_dtype</span>

    <span class="k">def</span> <span class="nf">extend_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="n">s</span> <span class="o">=</span> <span class="sa">f</span><span class="s2">&quot;quant_dtype=</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">quant_dtype</span><span class="si">}</span><span class="s2">&quot;</span>
        <span class="k">return</span> <span class="n">s</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">pass</span>

    <span class="n">partial_init</span> <span class="o">=</span> <span class="nb">classmethod</span><span class="p">(</span><span class="n">_partial_init</span><span class="p">)</span>


<span class="k">class</span> <span class="nc">UniformQuantObserver</span><span class="p">(</span><span class="n">_Observer</span><span class="p">):</span>
    <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    The base class of Uniform Quantization Observer.</span>

<span class="sd">    Args:</span>
<span class="sd">        quant_dtype (QuantDtype): The type of FakeQuant data. Default: QuantDtype.INT8.</span>
<span class="sd">        per_channel (bool):  Quantization granularity based on layer or on channel. Default: False.</span>
<span class="sd">        symmetric (bool): Whether the quantization algorithm is symmetric or not. Default: False.</span>
<span class="sd">        narrow_range (bool): Whether the quantization algorithm uses narrow range or not. Default: False.</span>
<span class="sd">        num_channels (int): declarate the min and max channel size, Default: 1.</span>

<span class="sd">    Returns:</span>
<span class="sd">        Tensor.</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="n">min_max_map</span> <span class="o">=</span> <span class="p">{</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT2</span><span class="p">:</span> <span class="p">(</span><span class="o">-</span><span class="mi">2</span><span class="p">,</span> <span class="mi">1</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT3</span><span class="p">:</span> <span class="p">(</span><span class="o">-</span><span class="mi">4</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT4</span><span class="p">:</span> <span class="p">(</span><span class="o">-</span><span class="mi">8</span><span class="p">,</span> <span class="mi">7</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT5</span><span class="p">:</span> <span class="p">(</span><span class="o">-</span><span class="mi">16</span><span class="p">,</span> <span class="mi">15</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT6</span><span class="p">:</span> <span class="p">(</span><span class="o">-</span><span class="mi">32</span><span class="p">,</span> <span class="mi">31</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT7</span><span class="p">:</span> <span class="p">(</span><span class="o">-</span><span class="mi">64</span><span class="p">,</span> <span class="mi">63</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">:</span> <span class="p">(</span><span class="o">-</span><span class="mi">128</span><span class="p">,</span> <span class="mi">127</span><span class="p">),</span>

        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">UINT2</span><span class="p">:</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">3</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">UINT3</span><span class="p">:</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">7</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">UINT4</span><span class="p">:</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">15</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">UINT5</span><span class="p">:</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">31</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">UINT6</span><span class="p">:</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">63</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">UINT7</span><span class="p">:</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">127</span><span class="p">),</span>
        <span class="n">QuantDtype</span><span class="o">.</span><span class="n">UINT8</span><span class="p">:</span> <span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="mi">255</span><span class="p">)</span>
    <span class="p">}</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">,</span> <span class="n">per_channel</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">symmetric</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">narrow_range</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">num_channels</span><span class="o">=</span><span class="mi">1</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize UniformQuantObserver.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">UniformQuantObserver</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">quant_dtype</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">per_channel</span> <span class="o">=</span> <span class="n">per_channel</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">symmetric</span> <span class="o">=</span> <span class="n">symmetric</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span> <span class="o">=</span> <span class="n">narrow_range</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">num_channels</span> <span class="o">=</span> <span class="n">num_channels</span>


<div class="viewcode-block" id="FakeQuantWithMinMaxObserver"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.FakeQuantWithMinMaxObserver.html#mindspore.nn.FakeQuantWithMinMaxObserver">[docs]</a><span class="k">class</span> <span class="nc">FakeQuantWithMinMaxObserver</span><span class="p">(</span><span class="n">UniformQuantObserver</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Quantization aware operation which provides the fake quantization observer function on data with min and max.</span>

<span class="sd">    The detail of the quantization mode `DEFAULT` is described as below:</span>

<span class="sd">    The running min/max :math:`x_{min}` and :math:`x_{max}` are computed as:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \begin{array}{ll} \\</span>
<span class="sd">            x_{min} =</span>
<span class="sd">            \begin{cases}</span>
<span class="sd">                \min(\min(X), 0)</span>
<span class="sd">                  &amp; \text{ if } ema = \text{False} \\</span>
<span class="sd">                \min((1 - c) \min(X) + \text{c } x_{min}, 0)</span>
<span class="sd">                  &amp; \text{ if } \text{otherwise}</span>
<span class="sd">            \end{cases}\\</span>
<span class="sd">            x_{max} =</span>
<span class="sd">            \begin{cases}</span>
<span class="sd">                \max(\max(X), 0)</span>
<span class="sd">                  &amp; \text{ if } ema = \text{False} \\</span>
<span class="sd">                \max((1 - c) \max(X) + \text{c } x_{max}, 0)</span>
<span class="sd">                  &amp; \text{ if } \text{otherwise}</span>
<span class="sd">            \end{cases}</span>
<span class="sd">        \end{array}</span>

<span class="sd">    where X is the input tensor, and :math:`c` is the `ema_decay`.</span>

<span class="sd">    The scale and zero point zp is computed as:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \begin{array}{ll} \\</span>
<span class="sd">            scale =</span>
<span class="sd">            \begin{cases}</span>
<span class="sd">                \frac{x_{max} - x_{min}}{Q_{max} - Q_{min}}</span>
<span class="sd">                  &amp; \text{ if } symmetric = \text{False} \\</span>
<span class="sd">                \frac{2\max(x_{max}, \left | x_{min} \right |) }{Q_{max} - Q_{min}}</span>
<span class="sd">                  &amp; \text{ if } \text{otherwise}</span>
<span class="sd">            \end{cases}\\</span>
<span class="sd">            zp\_min = Q_{min} - \frac{x_{min}}{scale} \\</span>
<span class="sd">            zp = \left \lfloor \min(Q_{max}, \max(Q_{min}, zp\_min)) + 0.5 \right \rfloor</span>
<span class="sd">        \end{array}</span>

<span class="sd">    where :math:`Q_{max}` and :math:`Q_{min}` is decided by quant_dtype, for example, if quant_dtype=INT8,</span>
<span class="sd">    then :math:`Q_{max} = 127` and :math:`Q_{min} = -128`.</span>

<span class="sd">    The fake quant output is computed as:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \begin{array}{ll} \\</span>
<span class="sd">            u_{min} = (Q_{min} - zp) * scale \\</span>
<span class="sd">            u_{max} = (Q_{max} - zp) * scale \\</span>
<span class="sd">            u_X = \left \lfloor \frac{\min(u_{max}, \max(u_{min}, X)) - u_{min}}{scale}</span>
<span class="sd">            + 0.5 \right \rfloor \\</span>
<span class="sd">            output = u_X * scale + u_{min}</span>
<span class="sd">        \end{array}</span>

<span class="sd">    The detail of the quantization mode `LEARNED_SCALE` is described as below:</span>

<span class="sd">    The fake quant output is computed as:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \bar{X}=\left\{\begin{matrix}</span>
<span class="sd">        clip\left ( \frac{X}{maxq},0,1\right ) \qquad \quad if\quad neg\_trunc\\</span>
<span class="sd">        clip\left ( \frac{X}{maxq},-1,1\right )\qquad \ if\quad otherwise</span>
<span class="sd">        \end{matrix}\right. \\</span>

<span class="sd">        output=\frac{floor\left ( \bar{X}\ast  Q_{max}+0.5  \right ) \ast scale }{Q_{max}}</span>

<span class="sd">    where X is the input tensor.</span>
<span class="sd">    where :math:`Q_{max}` (quant_max) is decided by quant_dtype and neg_trunc, for example, if quant_dtype=INT8</span>
<span class="sd">    and neg_trunc works, :math:`Q_{max} = 256` , otherwise :math:`Q_{max} = 127`.</span>

<span class="sd">    The maxq is updated by training, and its gradient is calculated as follows:</span>

<span class="sd">    .. math::</span>

<span class="sd">        \frac{\partial \ output}{\partial \ maxq} = \left\{\begin{matrix}</span>
<span class="sd">        -\frac{X}{maxq}+\left \lfloor \frac{X}{maxq} \right \rceil \qquad if\quad bound_{lower}&lt; \frac{X}{maxq}&lt; 1\\</span>
<span class="sd">        -1 \qquad \quad \qquad \quad if\quad \frac{X}{maxq}\le bound_{lower}\\</span>
<span class="sd">         1  \qquad \quad \qquad \quad if\quad \frac{X}{maxq}\ge  1 \qquad \quad</span>
<span class="sd">        \end{matrix}\right. \\</span>

<span class="sd">        bound_{lower}=</span>
<span class="sd">        \left\{\begin{matrix}</span>
<span class="sd">         0\qquad \quad if\quad neg\_trunc\\</span>
<span class="sd">        -1\qquad if\quad otherwise</span>
<span class="sd">        \end{matrix}\right.</span>

<span class="sd">    Then minq is computed as:</span>

<span class="sd">    .. math::</span>

<span class="sd">        minq=\left\{\begin{matrix}</span>
<span class="sd">        0  \qquad \qquad \quad if\quad neg\_trunc\\</span>
<span class="sd">        -maxq\qquad if\quad otherwise</span>
<span class="sd">        \end{matrix}\right.</span>

<span class="sd">    When exporting, the scale and zero point zp is computed as:</span>

<span class="sd">    .. math::</span>

<span class="sd">        scale=\frac{maxq}{quant\_max} ,\quad zp=0 \\</span>

<span class="sd">    zp is equal to 0 consistently, due to the LEARNED_SCALE`s symmetric nature.</span>

<span class="sd">    Args:</span>
<span class="sd">        min_init (int, float, list): The initialized min value. Default: -6.</span>
<span class="sd">        max_init (int, float, list): The initialized max value. Default: 6.</span>
<span class="sd">        ema (bool): The exponential Moving Average algorithm updates min and max. Default: False.</span>
<span class="sd">        ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.999.</span>
<span class="sd">        per_channel (bool):  Quantization granularity based on layer or on channel. Default: False.</span>
<span class="sd">        channel_axis (int): Quantization by channel axis. Default: 1.</span>
<span class="sd">        num_channels (int): declarate the min and max channel size, Default: 1.</span>
<span class="sd">        quant_dtype (QuantDtype): The datatype of quantization, supporting 4 and 8bits. Default: QuantDtype.INT8.</span>
<span class="sd">        symmetric (bool): Whether the quantization algorithm is symmetric or not. Default: False.</span>
<span class="sd">        narrow_range (bool): Whether the quantization algorithm uses narrow range or not. Default: False.</span>
<span class="sd">        quant_delay (int): Quantization delay parameters according to the global step. Default: 0.</span>
<span class="sd">        neg_trunc (bool): Whether the quantization algorithm uses negative truncation or not. Default: False.</span>
<span class="sd">        mode (str): Optional quantization mode, currently only `DEFAULT`(QAT) and `LEARNED_SCALE` are supported.</span>
<span class="sd">            Default: (&quot;DEFAULT&quot;)</span>
<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The input of FakeQuantWithMinMaxObserver. The input dimension is preferably 2D or 4D.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, with the same type and shape as the `x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `min_init` or `max_init` is not int, float or list.</span>
<span class="sd">        TypeError: If `quant_delay` is not an int.</span>
<span class="sd">        ValueError: If `quant_delay` is less than 0.</span>
<span class="sd">        ValueError: If `min_init` is not less than `max_init`.</span>
<span class="sd">        ValueError: If `mode` is neither `DEFAULT` nor `LEARNED_SCALE`.</span>
<span class="sd">        ValueError: If `mode` is `LEARNED_SCALE` and `symmetric` is not `True`.</span>
<span class="sd">        ValueError: If `mode` is `LEARNED_SCALE`, and `narrow_range` is not `True` unless when `neg_trunc` is `True`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; import mindspore</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; fake_quant = nn.FakeQuantWithMinMaxObserver()</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; result = fake_quant(x)</span>
<span class="sd">        &gt;&gt;&gt; print(result)</span>
<span class="sd">        [[ 0.9882355  1.9764705  0.9882355]</span>
<span class="sd">         [-1.9764705  0.        -0.9882355]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">min_init</span><span class="o">=-</span><span class="mi">6</span><span class="p">,</span>
                 <span class="n">max_init</span><span class="o">=</span><span class="mi">6</span><span class="p">,</span>
                 <span class="n">ema</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">ema_decay</span><span class="o">=</span><span class="mf">0.999</span><span class="p">,</span>
                 <span class="n">per_channel</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">channel_axis</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">num_channels</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">,</span>
                 <span class="n">symmetric</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">narrow_range</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">quant_delay</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                 <span class="n">neg_trunc</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">mode</span><span class="o">=</span><span class="s2">&quot;DEFAULT&quot;</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize FakeQuantWithMinMaxObserver&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">FakeQuantWithMinMaxObserver</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">(</span><span class="n">quant_dtype</span><span class="o">=</span><span class="n">quant_dtype</span><span class="p">,</span> <span class="n">per_channel</span><span class="o">=</span><span class="n">per_channel</span><span class="p">,</span>
                                                          <span class="n">symmetric</span><span class="o">=</span><span class="n">symmetric</span><span class="p">,</span> <span class="n">narrow_range</span><span class="o">=</span><span class="n">narrow_range</span><span class="p">,</span>
                                                          <span class="n">num_channels</span><span class="o">=</span><span class="n">num_channels</span><span class="p">)</span>
        <span class="n">Validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;min_init&quot;</span><span class="p">,</span> <span class="n">min_init</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">float</span><span class="p">,</span> <span class="nb">list</span><span class="p">],</span> <span class="nb">type</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="vm">__name__</span><span class="p">)</span>
        <span class="n">Validator</span><span class="o">.</span><span class="n">check_value_type</span><span class="p">(</span><span class="s2">&quot;max_init&quot;</span><span class="p">,</span> <span class="n">max_init</span><span class="p">,</span> <span class="p">[</span><span class="nb">int</span><span class="p">,</span> <span class="nb">float</span><span class="p">,</span> <span class="nb">list</span><span class="p">],</span> <span class="nb">type</span><span class="p">(</span><span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="vm">__name__</span><span class="p">)</span>
        <span class="n">Validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">quant_delay</span><span class="p">,</span> <span class="s1">&#39;quant_delay&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">min_init</span> <span class="o">=</span> <span class="n">min_init</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">max_init</span> <span class="o">=</span> <span class="n">max_init</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">quant_dtype</span> <span class="o">=</span> <span class="n">quant_dtype</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">num_bits</span> <span class="o">=</span> <span class="n">quant_dtype</span><span class="o">.</span><span class="n">num_bits</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">ema</span> <span class="o">=</span> <span class="n">ema</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">ema_decay</span> <span class="o">=</span> <span class="n">ema_decay</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">per_channel</span> <span class="o">=</span> <span class="n">per_channel</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">num_channels</span> <span class="o">=</span> <span class="n">num_channels</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">channel_axis</span> <span class="o">=</span> <span class="n">channel_axis</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">quant_delay</span> <span class="o">=</span> <span class="n">quant_delay</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">symmetric</span> <span class="o">=</span> <span class="n">symmetric</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span> <span class="o">=</span> <span class="n">narrow_range</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">neg_trunc</span> <span class="o">=</span> <span class="n">neg_trunc</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">mode</span> <span class="o">=</span> <span class="n">mode</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">is_ascend</span> <span class="o">=</span> <span class="n">context</span><span class="o">.</span><span class="n">get_context</span><span class="p">(</span><span class="s1">&#39;device_target&#39;</span><span class="p">)</span> <span class="o">==</span> <span class="s2">&quot;Ascend&quot;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">Neg</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Neg</span><span class="p">()</span>

        <span class="n">min_array</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_init_array</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">min_init</span><span class="p">)</span>
        <span class="n">max_array</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_init_array</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">max_init</span><span class="p">)</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="n">np</span><span class="o">.</span><span class="n">greater</span><span class="p">(</span><span class="n">max_array</span><span class="p">,</span> <span class="n">min_array</span><span class="p">)</span><span class="o">.</span><span class="n">all</span><span class="p">():</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;max_init&#39; should be greater than &#39;min_init&#39;, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got &#39;max_init&#39;: </span><span class="si">{</span><span class="n">max_init</span><span class="si">}</span><span class="s2">, &#39;min_init&#39;: </span><span class="si">{</span><span class="n">min_init</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">mode</span> <span class="o">==</span> <span class="s2">&quot;DEFAULT&quot;</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">_default_init</span><span class="p">(</span><span class="n">min_array</span><span class="p">,</span> <span class="n">max_array</span><span class="p">)</span>
        <span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">mode</span> <span class="o">==</span> <span class="s2">&quot;LEARNED_SCALE&quot;</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">_learned_scale_init</span><span class="p">(</span><span class="n">min_array</span><span class="p">,</span> <span class="n">max_array</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, only `DEFAULT` and `LEARNED_SCALE` mode are valid, but got &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;&#39;mode&#39;: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

<div class="viewcode-block" id="FakeQuantWithMinMaxObserver.reset"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.FakeQuantWithMinMaxObserver.html#mindspore.nn.FakeQuantWithMinMaxObserver.reset">[docs]</a>    <span class="k">def</span> <span class="nf">reset</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">,</span> <span class="n">min_init</span><span class="o">=-</span><span class="mi">6</span><span class="p">,</span> <span class="n">max_init</span><span class="o">=</span><span class="mi">6</span><span class="p">):</span>
        <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Reset the quant max parameter (eg. 256) and the initial value of the minq parameter and maxq parameter,</span>
<span class="sd">        this function is currently only valid for `LEARNED_SCALE` mode.</span>

<span class="sd">        Args:</span>
<span class="sd">            quant_dtype (QuantDtype): The datatype of quantization, supporting 4 and 8bits. Default: QuantDtype.INT8.</span>
<span class="sd">            min_init (int, float, list): The initialized min value. Default: -6.</span>
<span class="sd">            max_init (int, float, list): The initialized max value. Default: 6.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">mode</span> <span class="o">==</span> <span class="s2">&quot;LEARNED_SCALE&quot;</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">quant_dtype</span> <span class="o">=</span> <span class="n">quant_dtype</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">num_bits</span> <span class="o">=</span> <span class="n">quant_dtype</span><span class="o">.</span><span class="n">num_bits</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">_calculate_quant_max</span><span class="p">()</span>
            <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">neg_trunc</span><span class="p">:</span>
                <span class="n">min_init</span> <span class="o">=</span> <span class="mi">0</span>

            <span class="bp">self</span><span class="o">.</span><span class="n">min_init</span> <span class="o">=</span> <span class="n">min_init</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">max_init</span> <span class="o">=</span> <span class="n">max_init</span>
            <span class="n">min_array</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_init_array</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">min_init</span><span class="p">)</span>
            <span class="n">max_array</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_init_array</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">max_init</span><span class="p">)</span>
            <span class="k">if</span> <span class="ow">not</span> <span class="n">np</span><span class="o">.</span><span class="n">greater</span><span class="p">(</span><span class="n">max_array</span><span class="p">,</span> <span class="n">min_array</span><span class="p">)</span><span class="o">.</span><span class="n">all</span><span class="p">():</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;max_init&#39; should be greater than &#39;min_init&#39;, &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;but got &#39;max_init&#39;: </span><span class="si">{</span><span class="n">max_init</span><span class="si">}</span><span class="s2">, &#39;min_init&#39;: </span><span class="si">{</span><span class="n">min_init</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

            <span class="bp">self</span><span class="o">.</span><span class="n">minq</span><span class="o">.</span><span class="n">set_data</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">min_array</span><span class="p">))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">maxq</span><span class="o">.</span><span class="n">set_data</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">max_array</span><span class="p">))</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">quant_max</span><span class="o">.</span><span class="n">set_data</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="bp">self</span><span class="o">.</span><span class="n">_quant_max</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)))</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, only `LEARNED_SCALE` mode is valid, but got &#39;mode&#39;: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">mode</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span></div>

    <span class="k">def</span> <span class="nf">_default_init</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">min_array</span><span class="p">,</span> <span class="n">max_array</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Initialization of `DEFAULT`(QAT) mode.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="c1"># init tensor min and max for fake quantized operation</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">minq</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">min_array</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;quant_min&#39;</span><span class="p">,</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">maxq</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">max_array</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;quant_max&#39;</span><span class="p">,</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>

        <span class="c1"># init fake quant relative op</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">per_channel</span><span class="p">:</span>
            <span class="n">quant_fun</span> <span class="o">=</span> <span class="n">partial</span><span class="p">(</span><span class="n">Q</span><span class="o">.</span><span class="n">FakeQuantPerChannel</span><span class="p">,</span> <span class="n">channel_axis</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">channel_axis</span><span class="p">)</span>
            <span class="n">ema_fun</span> <span class="o">=</span> <span class="n">partial</span><span class="p">(</span><span class="n">Q</span><span class="o">.</span><span class="n">MinMaxUpdatePerChannel</span><span class="p">,</span> <span class="n">channel_axis</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">channel_axis</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">quant_fun</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">FakeQuantPerLayer</span>
            <span class="n">ema_fun</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">MinMaxUpdatePerLayer</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">ema_update</span> <span class="o">=</span> <span class="n">ema_fun</span><span class="p">(</span><span class="n">ema</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">ema</span><span class="p">,</span> <span class="n">ema_decay</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">ema_decay</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">is_ascend</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_train</span> <span class="o">=</span> <span class="n">quant_fun</span><span class="p">(</span><span class="n">num_bits</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">quant_dtype</span><span class="o">.</span><span class="n">num_bits</span><span class="p">,</span>
                                              <span class="n">symmetric</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">symmetric</span><span class="p">,</span>
                                              <span class="n">narrow_range</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span><span class="p">,</span>
                                              <span class="n">quant_delay</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">quant_delay</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_infer</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_train</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">quant_fun</span> <span class="o">=</span> <span class="n">partial</span><span class="p">(</span><span class="n">quant_fun</span><span class="p">,</span>
                                <span class="n">ema</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">ema</span><span class="p">,</span>
                                <span class="n">ema_decay</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">ema_decay</span><span class="p">,</span>
                                <span class="n">num_bits</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">quant_dtype</span><span class="o">.</span><span class="n">num_bits</span><span class="p">,</span>
                                <span class="n">symmetric</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">symmetric</span><span class="p">,</span>
                                <span class="n">narrow_range</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span><span class="p">,</span>
                                <span class="n">quant_delay</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">quant_delay</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_train</span> <span class="o">=</span> <span class="n">quant_fun</span><span class="p">(</span><span class="n">training</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_infer</span> <span class="o">=</span> <span class="n">quant_fun</span><span class="p">(</span><span class="n">training</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">_learned_scale_init</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">min_array</span><span class="p">,</span> <span class="n">max_array</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Initialization of `LEARNED_SCALE` mode.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">symmetric</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;LEARNED_SCALE&#39; mode only support &#39;symmetric&#39; quant, &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;but got &#39;symmetric&#39;: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">symmetric</span><span class="si">}</span><span class="s2">. Please set &#39;symmetric&#39; to True.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">neg_trunc</span><span class="p">:</span>
            <span class="n">min_array</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">_get_init_array</span><span class="p">(</span><span class="mi">0</span><span class="p">)</span>
            <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;LEARNED_SCALE&#39; mode only support the combination of &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;&#39;neg_trunc=True and narrow_range=False&#39; config scenario, but got &#39;narrow_range&#39;: &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">elif</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;LEARNED_SCALE&#39; mode only support &#39;narrow_range=True&#39; &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;config, except for &#39;neg_trunc=True&#39; scenario. But got &#39;narrow_range&#39;: &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">_calculate_quant_max</span><span class="p">()</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">minq</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">min_array</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;minq&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">maxq</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">max_array</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;maxq&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">quant_max</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">Tensor</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="bp">self</span><span class="o">.</span><span class="n">_quant_max</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)),</span>
                                   <span class="n">name</span><span class="o">=</span><span class="s2">&quot;quant_max&quot;</span><span class="p">,</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>

        <span class="c1"># init fake quant relative op</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">per_channel</span><span class="p">:</span>
            <span class="n">quant_fun</span> <span class="o">=</span> <span class="n">partial</span><span class="p">(</span><span class="n">Q</span><span class="o">.</span><span class="n">FakeLearnedScaleQuantPerChannel</span><span class="p">,</span> <span class="n">channel_axis</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">channel_axis</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">quant_fun</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">FakeLearnedScaleQuantPerLayer</span>

        <span class="n">quant_fun</span> <span class="o">=</span> <span class="n">partial</span><span class="p">(</span><span class="n">quant_fun</span><span class="p">,</span>
                            <span class="n">quant_delay</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">quant_delay</span><span class="p">,</span>
                            <span class="n">neg_trunc</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">neg_trunc</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_train</span> <span class="o">=</span> <span class="n">quant_fun</span><span class="p">(</span><span class="n">training</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_infer</span> <span class="o">=</span> <span class="n">quant_fun</span><span class="p">(</span><span class="n">training</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">_get_init_array</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">init_date</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        Convert the initial value to array.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">init_date</span><span class="p">,</span> <span class="nb">list</span><span class="p">)</span> <span class="ow">and</span> <span class="bp">self</span><span class="o">.</span><span class="n">per_channel</span> <span class="ow">and</span> <span class="nb">len</span><span class="p">(</span><span class="n">init_date</span><span class="p">)</span> <span class="o">!=</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_channels</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the length of &#39;min_init/max_init&#39; list should be equal to &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;&#39;num_channels&#39; for perchannel quant scenario, but got &#39;min_init/max_init&#39;: </span><span class="si">{</span><span class="n">init_date</span><span class="si">}</span><span class="s2"> &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;and num_channels: </span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">num_channels</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">init_date</span><span class="p">,</span> <span class="nb">list</span><span class="p">)</span> <span class="ow">and</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">per_channel</span> <span class="ow">and</span> <span class="nb">len</span><span class="p">(</span><span class="n">init_date</span><span class="p">)</span> <span class="o">!=</span> <span class="mi">1</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the length of the &#39;min_init/max_init&#39; list should be 1 for &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;perlayer quant scenario, but got </span><span class="si">{</span><span class="nb">len</span><span class="p">(</span><span class="n">init_date</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">init_date</span><span class="p">,</span> <span class="nb">list</span><span class="p">):</span>
            <span class="n">min_max_array</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">(</span><span class="n">init_date</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
        <span class="k">elif</span> <span class="bp">self</span><span class="o">.</span><span class="n">per_channel</span> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">init_date</span><span class="p">,</span> <span class="nb">list</span><span class="p">):</span>
            <span class="n">min_max_array</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="n">init_date</span><span class="p">]</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_channels</span><span class="p">)</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">min_max_array</span> <span class="o">=</span> <span class="n">np</span><span class="o">.</span><span class="n">array</span><span class="p">([</span><span class="n">init_date</span><span class="p">])</span><span class="o">.</span><span class="n">astype</span><span class="p">(</span><span class="n">np</span><span class="o">.</span><span class="n">float32</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">min_max_array</span>

    <span class="k">def</span> <span class="nf">_calculate_quant_max</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;</span>
<span class="sd">        The quantization range is calculated according to num_bits.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">neg_trunc</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">_quant_max</span> <span class="o">=</span> <span class="p">(</span><span class="mi">1</span> <span class="o">&lt;&lt;</span> <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">num_bits</span> <span class="o">-</span> <span class="mi">1</span><span class="p">))</span> <span class="o">-</span> <span class="mi">1</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">_quant_max</span> <span class="o">=</span> <span class="p">(</span><span class="mi">1</span> <span class="o">&lt;&lt;</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_bits</span><span class="p">)</span> <span class="o">-</span> <span class="mi">1</span>

<div class="viewcode-block" id="FakeQuantWithMinMaxObserver.extend_repr"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.FakeQuantWithMinMaxObserver.html#mindspore.nn.FakeQuantWithMinMaxObserver.extend_repr">[docs]</a>    <span class="k">def</span> <span class="nf">extend_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Display instance object as string.&quot;&quot;&quot;</span>
        <span class="n">s</span> <span class="o">=</span> <span class="s1">&#39;quant_dtype=</span><span class="si">{}</span><span class="s1">, symmetric=</span><span class="si">{}</span><span class="s1">, narrow_range=</span><span class="si">{}</span><span class="s1">, ema=</span><span class="si">{}</span><span class="s1">(</span><span class="si">{}</span><span class="s1">), per_channel=</span><span class="si">{}</span><span class="s1">(</span><span class="si">{}</span><span class="s1">, </span><span class="si">{}</span><span class="s1">), &#39;</span> \
            <span class="s1">&#39;quant_delay=</span><span class="si">{}</span><span class="s1">, min_init=</span><span class="si">{}</span><span class="s1">, max_init=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">quant_dtype</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">symmetric</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span><span class="p">,</span>
                                                              <span class="bp">self</span><span class="o">.</span><span class="n">ema</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">ema_decay</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">per_channel</span><span class="p">,</span>
                                                              <span class="bp">self</span><span class="o">.</span><span class="n">channel_axis</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">num_channels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">quant_delay</span><span class="p">,</span>
                                                              <span class="bp">self</span><span class="o">.</span><span class="n">min_init</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">max_init</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">s</span></div>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">mode</span> <span class="o">==</span> <span class="s2">&quot;LEARNED_SCALE&quot;</span><span class="p">:</span>
            <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">training</span><span class="p">:</span>
                <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_train</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">maxq</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">quant_max</span><span class="p">)</span>
                <span class="k">if</span> <span class="ow">not</span> <span class="bp">self</span><span class="o">.</span><span class="n">neg_trunc</span><span class="p">:</span>
                    <span class="bp">self</span><span class="o">.</span><span class="n">minq</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">Neg</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">maxq</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_infer</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">maxq</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">quant_max</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">training</span><span class="p">:</span>
                <span class="n">min_up</span><span class="p">,</span> <span class="n">max_up</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">ema_update</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">minq</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">maxq</span><span class="p">)</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">minq</span> <span class="o">=</span> <span class="n">min_up</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">maxq</span> <span class="o">=</span> <span class="n">max_up</span>
                <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_train</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">minq</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">maxq</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_infer</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">minq</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">maxq</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">out</span></div>


<span class="n">QuantConfig</span> <span class="o">=</span> <span class="n">namedtuple</span><span class="p">(</span><span class="s2">&quot;QuantConfig&quot;</span><span class="p">,</span> <span class="p">[</span><span class="s1">&#39;weight&#39;</span><span class="p">,</span> <span class="s1">&#39;activation&#39;</span><span class="p">])</span>

<span class="n">quant_config_default</span> <span class="o">=</span> <span class="n">QuantConfig</span><span class="p">(</span><span class="n">weight</span><span class="o">=</span><span class="n">FakeQuantWithMinMaxObserver</span><span class="o">.</span><span class="n">partial_init</span><span class="p">(),</span>
                                   <span class="n">activation</span><span class="o">=</span><span class="n">FakeQuantWithMinMaxObserver</span><span class="o">.</span><span class="n">partial_init</span><span class="p">())</span>


<div class="viewcode-block" id="Conv2dBnFoldQuantOneConv"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.Conv2dBnFoldQuantOneConv.html#mindspore.nn.Conv2dBnFoldQuantOneConv">[docs]</a><span class="k">class</span> <span class="nc">Conv2dBnFoldQuantOneConv</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    2D convolution which use the convolution layer statistics once to calculate Batch Normalization</span>
<span class="sd">    operation folded construct.</span>

<span class="sd">    This part is a more detailed overview of Conv2d operation. For more details about Quantization,</span>
<span class="sd">    please refer to the implementation of class of `FakeQuantWithMinMaxObserver`,</span>
<span class="sd">    :class:`FakeQuantWithMinMaxObserver`.</span>

<span class="sd">    .. math::</span>
<span class="sd">        w_{q}=quant(\frac{w}{\sqrt{var_{G}+\epsilon}}*\gamma )</span>

<span class="sd">        b=\frac{-\mu _{G} }{\sqrt{var_{G}+\epsilon }}*\gamma +\beta</span>

<span class="sd">        y=w_{q}\times x+b</span>

<span class="sd">    where :math:`quant` is the continuous execution of quant and dequant, you can refer to the implementation of</span>
<span class="sd">    subclass of `FakeQuantWithMinMaxObserver`, :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.</span>
<span class="sd">    `mu _{G}` and `var_{G}` represent the global mean and variance respectively.</span>

<span class="sd">    Args:</span>
<span class="sd">        in_channels (int): The number of input channel :math:`C_{in}`.</span>
<span class="sd">        out_channels (int): The number of output channel :math:`C_{out}`.</span>
<span class="sd">        kernel_size (Union[int, tuple[int]]): Specifies the height and width of the 2D convolution window.</span>
<span class="sd">        stride (Union[int, tuple[int]]): Specifies stride for all spatial dimensions with the same value. Default: 1.</span>
<span class="sd">        pad_mode (str): Specifies padding mode. The optional values are &quot;same&quot;, &quot;valid&quot;, &quot;pad&quot;. Default: &quot;same&quot;.</span>
<span class="sd">        padding (Union[int, tuple[int]]): Implicit paddings on both sides of the `x`. Default: 0.</span>
<span class="sd">        dilation (Union[int, tuple[int]]): Specifies the dilation rate to use for dilated convolution. Default: 1.</span>
<span class="sd">        group (int): Splits filter into groups, `in_ channels` and `out_channels` must be</span>
<span class="sd">            divisible by the number of groups. Default: 1.</span>
<span class="sd">        eps (float): Parameters for Batch Normalization. Default: 1e-5.</span>
<span class="sd">        momentum (float): Parameters for Batch Normalization op. Default: 0.997.</span>
<span class="sd">        has_bias (bool): Specifies whether the layer uses a bias vector, which is temporarily invalid. Default: False.</span>
<span class="sd">        weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            convolution kernel. Default: &#39;normal&#39;.</span>
<span class="sd">        bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            bias vector. Default: &#39;zeros&#39;.</span>
<span class="sd">        beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            beta vector. Default: &#39;zeros&#39;.</span>
<span class="sd">        gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            gamma vector. Default: &#39;ones&#39;.</span>
<span class="sd">        mean_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            mean vector. Default: &#39;zeros&#39;.</span>
<span class="sd">        var_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            variance vector. Default: &#39;ones&#39;.</span>
<span class="sd">        fake (bool): Whether Conv2dBnFoldQuant Cell adds FakeQuantWithMinMaxObserver. Default: True.</span>
<span class="sd">        quant_config (QuantConfig): Configures the types of quant observer and quant settings of weight and</span>
<span class="sd">            activation. Note that, QuantConfig is a special namedtuple, which is designed for quantization</span>
<span class="sd">            and can be generated by :func:`mindspore.compression.quant.create_quant_config` method.</span>
<span class="sd">            Default: QuantConfig with both items set to default :class:`FakeQuantWithMinMaxObserver`.</span>
<span class="sd">        quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `in_channels`, `out_channels` or `group` is not an int.</span>
<span class="sd">        TypeError: If `kernel_size`, `stride`, `padding` or `dilation` is neither an int nor a tuple.</span>
<span class="sd">        TypeError: If `has_bias` or `fake` is not a bool.</span>
<span class="sd">        TypeError: If `data_format` is not a string.</span>
<span class="sd">        ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.</span>
<span class="sd">        ValueError: If `padding` is less than 0.</span>
<span class="sd">        ValueError: If `pad_mode` is not one of &#39;same&#39;, &#39;valid&#39;, &#39;pad&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; import mindspore</span>
<span class="sd">        &gt;&gt;&gt; from mindspore.compression import quant</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; qconfig = quant.create_quant_config()</span>
<span class="sd">        &gt;&gt;&gt; conv2d_bnfold = nn.Conv2dBnFoldQuantOneConv(1, 1, kernel_size=(2, 2), stride=(1, 1), pad_mode=&quot;valid&quot;,</span>
<span class="sd">        ...                                             weight_init=&quot;ones&quot;, quant_config=qconfig)</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[[[1, 0, 3], [1, 4, 7], [2, 5, 2]]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; result = conv2d_bnfold(x)</span>
<span class="sd">        &gt;&gt;&gt; print(result)</span>
<span class="sd">        [[[[5.9296875 13.8359375]</span>
<span class="sd">           [11.859375 17.78125]]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">in_channels</span><span class="p">,</span>
                 <span class="n">out_channels</span><span class="p">,</span>
                 <span class="n">kernel_size</span><span class="p">,</span>
                 <span class="n">stride</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">pad_mode</span><span class="o">=</span><span class="s1">&#39;same&#39;</span><span class="p">,</span>
                 <span class="n">padding</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                 <span class="n">dilation</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">group</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">eps</span><span class="o">=</span><span class="mf">1e-5</span><span class="p">,</span>
                 <span class="n">momentum</span><span class="o">=</span><span class="mf">0.997</span><span class="p">,</span>
                 <span class="n">has_bias</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">weight_init</span><span class="o">=</span><span class="s1">&#39;normal&#39;</span><span class="p">,</span>
                 <span class="n">bias_init</span><span class="o">=</span><span class="s1">&#39;zeros&#39;</span><span class="p">,</span>
                 <span class="n">beta_init</span><span class="o">=</span><span class="s1">&#39;zeros&#39;</span><span class="p">,</span>
                 <span class="n">gamma_init</span><span class="o">=</span><span class="s1">&#39;ones&#39;</span><span class="p">,</span>
                 <span class="n">mean_init</span><span class="o">=</span><span class="s1">&#39;zeros&#39;</span><span class="p">,</span>
                 <span class="n">var_init</span><span class="o">=</span><span class="s1">&#39;ones&#39;</span><span class="p">,</span>
                 <span class="n">fake</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
                 <span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config_default</span><span class="p">,</span>
                 <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Conv2dBnFoldQuant layer&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Conv2dBnFoldQuantOneConv</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">in_channels</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">in_channels</span><span class="p">,</span> <span class="s2">&quot;in_channels&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">out_channels</span><span class="p">,</span> <span class="s2">&quot;out_channels&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">kernel_size</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">stride</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">stride</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">dilation</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">kernel_size_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">kernel_size_elem</span><span class="p">,</span> <span class="s1">&#39;kernel_size item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">stride_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">stride_elem</span><span class="p">,</span> <span class="s1">&#39;stride item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">dilation_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">dilation_elem</span><span class="p">,</span> <span class="s1">&#39;dilation item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">pad_mode</span> <span class="ow">not</span> <span class="ow">in</span> <span class="p">(</span><span class="s1">&#39;valid&#39;</span><span class="p">,</span> <span class="s1">&#39;same&#39;</span><span class="p">,</span> <span class="s1">&#39;pad&#39;</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;pad_mode&#39; should be one of values &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;in (&#39;valid&#39;, &#39;same&#39;, &#39;pad&#39;), but got </span><span class="si">{</span><span class="n">pad_mode</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">pad_mode</span> <span class="o">=</span> <span class="n">pad_mode</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="s1">&#39;padding&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">padding</span> <span class="o">=</span> <span class="n">padding</span>
        <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">):</span>
            <span class="k">for</span> <span class="n">pad</span> <span class="ow">in</span> <span class="n">padding</span><span class="p">:</span>
                <span class="n">Validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">pad</span><span class="p">,</span> <span class="s1">&#39;padding item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">padding</span> <span class="o">=</span> <span class="n">padding</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the type of &#39;padding&#39; must be int/tuple(int), but got &quot;</span>
                            <span class="sa">f</span><span class="s2">&quot;</span><span class="si">{</span><span class="nb">type</span><span class="p">(</span><span class="n">padding</span><span class="p">)</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">!&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">group</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">group</span><span class="p">,</span> <span class="s2">&quot;group&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">eps</span> <span class="o">=</span> <span class="n">eps</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">=</span> <span class="mi">1</span> <span class="o">-</span> <span class="n">momentum</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span> <span class="o">=</span> <span class="n">has_bias</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_bool</span><span class="p">(</span><span class="n">fake</span><span class="p">,</span> <span class="s2">&quot;fake&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">quant_config</span> <span class="o">=</span> <span class="n">quant_config</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">quant_dtype</span> <span class="o">=</span> <span class="n">quant_dtype</span>
        <span class="n">data_format</span> <span class="o">=</span> <span class="s1">&#39;NCHW&#39;</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">format</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_string</span><span class="p">(</span><span class="n">data_format</span><span class="p">,</span> <span class="p">[</span><span class="s1">&#39;NCHW&#39;</span><span class="p">,</span> <span class="s1">&#39;NHWC&#39;</span><span class="p">],</span> <span class="s1">&#39;format&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">_target</span> <span class="o">=</span> <span class="n">context</span><span class="o">.</span><span class="n">get_context</span><span class="p">(</span><span class="s2">&quot;device_target&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">is_graph_mode</span> <span class="o">=</span> <span class="n">context</span><span class="o">.</span><span class="n">get_context</span><span class="p">(</span><span class="s2">&quot;mode&quot;</span><span class="p">)</span> <span class="o">==</span> <span class="n">context</span><span class="o">.</span><span class="n">GRAPH_MODE</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">is_ge_backend</span> <span class="o">=</span> <span class="kc">False</span>
        <span class="k">if</span> <span class="n">context</span><span class="o">.</span><span class="n">get_context</span><span class="p">(</span><span class="s2">&quot;enable_ge&quot;</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">is_ge_backend</span> <span class="o">=</span> <span class="kc">True</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">enable_default_train</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">is_graph_mode</span> <span class="ow">and</span> \
                                    <span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">is_ge_backend</span> <span class="ow">or</span> <span class="bp">self</span><span class="o">.</span><span class="n">_target</span> <span class="o">==</span> <span class="s2">&quot;Ascend&quot;</span><span class="p">)</span>

        <span class="c1"># initialize convolution op and Parameter</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Conv2D</span><span class="p">(</span><span class="n">out_channel</span><span class="o">=</span><span class="n">out_channels</span><span class="p">,</span>
                             <span class="n">kernel_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">,</span>
                             <span class="n">pad_mode</span><span class="o">=</span><span class="n">pad_mode</span><span class="p">,</span>
                             <span class="n">pad</span><span class="o">=</span><span class="n">padding</span><span class="p">,</span>
                             <span class="n">stride</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">,</span>
                             <span class="n">dilation</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">,</span>
                             <span class="n">group</span><span class="o">=</span><span class="n">group</span><span class="p">)</span>
        <span class="n">weight_shape</span> <span class="o">=</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">,</span> <span class="n">in_channels</span> <span class="o">//</span> <span class="n">group</span><span class="p">,</span> <span class="o">*</span><span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">]</span>
        <span class="n">channel_axis</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">channel_axis</span> <span class="o">=</span> <span class="n">channel_axis</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">weight_init</span><span class="p">,</span> <span class="n">weight_shape</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;weight&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">bias_add</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">BiasAdd</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_bool</span><span class="p">(</span><span class="n">has_bias</span><span class="p">,</span> <span class="s2">&quot;has_bias&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">bias_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;bias&#39;</span><span class="p">)</span>

        <span class="c1"># initialize BatchNorm Parameter</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">gamma_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;gamma&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">beta</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">beta_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;beta&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">moving_mean</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">mean_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;moving_mean&#39;</span><span class="p">,</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">moving_variance</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">var_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;moving_variance&#39;</span><span class="p">,</span>
                                         <span class="n">requires_grad</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>

        <span class="c1"># initialize fake ops</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span> <span class="o">=</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">weight</span><span class="p">(</span><span class="n">ema</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                                                     <span class="n">channel_axis</span><span class="o">=</span><span class="n">channel_axis</span><span class="p">,</span>
                                                     <span class="n">num_channels</span><span class="o">=</span><span class="n">out_channels</span><span class="p">,</span>
                                                     <span class="n">quant_dtype</span><span class="o">=</span><span class="n">quant_dtype</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">freeze_bn</span> <span class="o">=</span> <span class="kc">False</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span><span class="o">.</span><span class="n">mode</span> <span class="o">==</span> <span class="s2">&quot;LEARNED_SCALE&quot;</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">freeze_bn</span> <span class="o">=</span> <span class="kc">True</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">bn_train</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">BatchNorm</span><span class="p">(</span><span class="n">is_training</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">epsilon</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">eps</span><span class="p">,</span>
                                    <span class="n">momentum</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">momentum</span><span class="p">,</span> <span class="n">data_format</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">format</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">bn_infer</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">BatchNorm</span><span class="p">(</span><span class="n">is_training</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">epsilon</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">eps</span><span class="p">,</span> <span class="n">data_format</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">format</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">sub_mean</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Sub</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">sub_var</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Sub</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">mul_mean</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Mul</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">mul_var</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Mul</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">assign_sub_mean</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">AssignSub</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">assign_sub_var</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">AssignSub</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Reshape</span><span class="p">()</span>

<div class="viewcode-block" id="Conv2dBnFoldQuantOneConv.extend_repr"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.Conv2dBnFoldQuantOneConv.html#mindspore.nn.Conv2dBnFoldQuantOneConv.extend_repr">[docs]</a>    <span class="k">def</span> <span class="nf">extend_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Display instance object as string.&quot;&quot;&quot;</span>
        <span class="n">s</span> <span class="o">=</span> <span class="s1">&#39;in_channels=</span><span class="si">{}</span><span class="s1">, out_channels=</span><span class="si">{}</span><span class="s1">, kernel_size=</span><span class="si">{}</span><span class="s1">, stride=</span><span class="si">{}</span><span class="s1">, &#39;</span> \
            <span class="s1">&#39;pad_mode=</span><span class="si">{}</span><span class="s1">, padding=</span><span class="si">{}</span><span class="s1">, dilation=</span><span class="si">{}</span><span class="s1">, group=</span><span class="si">{}</span><span class="s1">, &#39;</span> \
            <span class="s1">&#39;fake=</span><span class="si">{}</span><span class="s1">, momentum=</span><span class="si">{}</span><span class="s1">, quant_delay=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">in_channels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span><span class="p">,</span>
                                                          <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">,</span>
                                                          <span class="bp">self</span><span class="o">.</span><span class="n">pad_mode</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">padding</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">,</span>
                                                          <span class="bp">self</span><span class="o">.</span><span class="n">group</span><span class="p">,</span>
                                                          <span class="bp">self</span><span class="o">.</span><span class="n">fake</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span><span class="p">,</span>
                                                          <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span><span class="o">.</span><span class="n">quant_delay</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">s</span></div>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">running_std</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Sqrt</span><span class="p">()(</span><span class="n">P</span><span class="o">.</span><span class="n">Add</span><span class="p">()(</span><span class="bp">self</span><span class="o">.</span><span class="n">moving_variance</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">eps</span><span class="p">))</span>
        <span class="n">scale_factor</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span> <span class="o">/</span> <span class="n">running_std</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">channel_axis</span><span class="p">:</span>
            <span class="n">scale_factor</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">scale_factor</span><span class="p">,</span> <span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">scale_factor</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">scale_factor</span><span class="p">,</span> <span class="p">(</span><span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
        <span class="n">weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">*</span> <span class="n">scale_factor</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake</span><span class="p">:</span>
            <span class="n">weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span><span class="p">(</span><span class="n">weight</span><span class="p">)</span>
        <span class="n">conv</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">conv</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">weight</span><span class="p">)</span>

        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">freeze_bn</span><span class="p">:</span>
            <span class="k">return</span> <span class="n">conv</span> <span class="o">+</span> <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span><span class="p">((</span><span class="bp">self</span><span class="o">.</span><span class="n">beta</span> <span class="o">-</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span> <span class="o">*</span> <span class="bp">self</span><span class="o">.</span><span class="n">moving_mean</span> <span class="o">/</span> <span class="n">running_std</span><span class="p">),</span> <span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
        <span class="n">scale_factor</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">reshape</span><span class="p">(</span><span class="n">scale_factor</span><span class="p">,</span> <span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">,</span> <span class="mi">1</span><span class="p">))</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">enable_default_train</span><span class="p">:</span>
            <span class="n">scale_factor</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Reciprocal</span><span class="p">()(</span><span class="n">scale_factor</span><span class="p">)</span>
            <span class="n">conv_orig</span> <span class="o">=</span> <span class="n">conv</span> <span class="o">*</span> <span class="n">scale_factor</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="n">conv_orig</span> <span class="o">=</span> <span class="n">conv</span> <span class="o">/</span> <span class="n">scale_factor</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">training</span><span class="p">:</span>
            <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">bn_train</span><span class="p">(</span><span class="n">conv_orig</span><span class="p">,</span>
                                 <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span><span class="p">,</span>
                                 <span class="bp">self</span><span class="o">.</span><span class="n">beta</span><span class="p">,</span>
                                 <span class="bp">self</span><span class="o">.</span><span class="n">moving_mean</span><span class="p">,</span>
                                 <span class="bp">self</span><span class="o">.</span><span class="n">moving_variance</span><span class="p">)[</span><span class="mi">0</span><span class="p">]</span>

        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">bn_infer</span><span class="p">(</span><span class="n">conv_orig</span><span class="p">,</span>
                             <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span><span class="p">,</span>
                             <span class="bp">self</span><span class="o">.</span><span class="n">beta</span><span class="p">,</span>
                             <span class="bp">self</span><span class="o">.</span><span class="n">moving_mean</span><span class="p">,</span>
                             <span class="bp">self</span><span class="o">.</span><span class="n">moving_variance</span><span class="p">)[</span><span class="mi">0</span><span class="p">]</span></div>


<div class="viewcode-block" id="Conv2dBnFoldQuant"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.Conv2dBnFoldQuant.html#mindspore.nn.Conv2dBnFoldQuant">[docs]</a><span class="k">class</span> <span class="nc">Conv2dBnFoldQuant</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    2D convolution with Batch Normalization operation folded construct.</span>

<span class="sd">    This part is a more detailed overview of Conv2d operation. For more details about Quantization,</span>
<span class="sd">    please refer to the implementation of class of `FakeQuantWithMinMaxObserver`,</span>
<span class="sd">    :class:`FakeQuantWithMinMaxObserver`.</span>

<span class="sd">    .. math::</span>
<span class="sd">        y = x\times w+  b</span>

<span class="sd">        w_{q}=quant(\frac{w}{\sqrt{Var[y]+\epsilon}}*\gamma )</span>

<span class="sd">        y_{out}= w_{q}\times x+\frac{b-E[y]}{\sqrt{Var[y]+\epsilon}}*\gamma +\beta</span>

<span class="sd">    where :math:`quant` is the continuous execution of quant and dequant. Two convolution</span>
<span class="sd">    and Batch Normalization operation are used here, the purpose of the first convolution and Batch Normalization</span>
<span class="sd">    is to count the mean `E[y]` and variance `Var[y]` of current batch output for quantization.</span>

<span class="sd">    Args:</span>
<span class="sd">        in_channels (int): The number of input channel :math:`C_{in}`.</span>
<span class="sd">        out_channels (int): The number of output channel :math:`C_{out}`.</span>
<span class="sd">        kernel_size (Union[int, tuple[int]]): Specifies the height and width of the 2D convolution window.</span>
<span class="sd">        stride (Union[int, tuple[int]]): Specifies stride for all spatial dimensions with the same value. Default: 1.</span>
<span class="sd">        pad_mode (str): Specifies padding mode. The optional values are &quot;same&quot;, &quot;valid&quot;, &quot;pad&quot;. Default: &quot;same&quot;.</span>
<span class="sd">        padding (Union[int, tuple[int]]): Implicit paddings on both sides of the `x`. Default: 0.</span>
<span class="sd">        dilation (Union[int, tuple[int]]): Specifies the dilation rate to use for dilated convolution. Default: 1.</span>
<span class="sd">        group (int): Splits filter into groups, `in_ channels` and `out_channels` must be</span>
<span class="sd">            divisible by the number of groups. Default: 1.</span>
<span class="sd">        eps (float): Parameters for Batch Normalization. Default: 1e-5.</span>
<span class="sd">        momentum (float): Parameters for Batch Normalization op. Default: 0.997.</span>
<span class="sd">        has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.</span>
<span class="sd">        weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            convolution kernel. Default: &#39;normal&#39;.</span>
<span class="sd">        bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            bias vector. Default: &#39;zeros&#39;.</span>
<span class="sd">        beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            beta vector. Default: &#39;zeros&#39;.</span>
<span class="sd">        gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            gamma vector. Default: &#39;ones&#39;.</span>
<span class="sd">        mean_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            mean vector. Default: &#39;zeros&#39;.</span>
<span class="sd">        var_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the</span>
<span class="sd">            variance vector. Default: &#39;ones&#39;.</span>
<span class="sd">        fake (bool): Whether Conv2dBnFoldQuant Cell adds FakeQuantWithMinMaxObserver. Default: True.</span>
<span class="sd">        quant_config (QuantConfig): Configures the types of quant observer and quant settings of weight and</span>
<span class="sd">            activation. Note that, QuantConfig is a special namedtuple, which is designed for quantization</span>
<span class="sd">            and can be generated by :func:`mindspore.compression.quant.create_quant_config` method.</span>
<span class="sd">            Default: QuantConfig with both items set to default :class:`FakeQuantWithMinMaxObserver`.</span>
<span class="sd">        quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.</span>
<span class="sd">        freeze_bn (int): The quantization freeze Batch Normalization op is according to the global step.</span>
<span class="sd">            Default: 100000.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `in_channels`, `out_channels` or `group` is not an int.</span>
<span class="sd">        TypeError: If `kernel_size`, `stride`, `padding` or `dilation` is neither an int nor a tuple.</span>
<span class="sd">        TypeError: If `has_bias` or `fake` is not a bool.</span>
<span class="sd">        ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.</span>
<span class="sd">        ValueError: If `padding` is less than 0.</span>
<span class="sd">        ValueError: If `pad_mode` is not one of &#39;same&#39;, &#39;valid&#39;, &#39;pad&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; import mindspore</span>
<span class="sd">        &gt;&gt;&gt; from mindspore.compression import quant</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; qconfig = quant.create_quant_config()</span>
<span class="sd">        &gt;&gt;&gt; conv2d_bnfold = nn.Conv2dBnFoldQuant(1, 1, kernel_size=(2, 2), stride=(1, 1), pad_mode=&quot;valid&quot;,</span>
<span class="sd">        ...                                      weight_init=&quot;ones&quot;, quant_config=qconfig)</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[[[1, 0, 3], [1, 4, 7], [2, 5, 2]]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; result = conv2d_bnfold(x)</span>
<span class="sd">        &gt;&gt;&gt; print(result)</span>
<span class="sd">        [[[[5.9296875 13.8359375]</span>
<span class="sd">           [11.859375 17.78125]]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">in_channels</span><span class="p">,</span>
                 <span class="n">out_channels</span><span class="p">,</span>
                 <span class="n">kernel_size</span><span class="p">,</span>
                 <span class="n">stride</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">pad_mode</span><span class="o">=</span><span class="s1">&#39;same&#39;</span><span class="p">,</span>
                 <span class="n">padding</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                 <span class="n">dilation</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">group</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">eps</span><span class="o">=</span><span class="mf">1e-5</span><span class="p">,</span>
                 <span class="n">momentum</span><span class="o">=</span><span class="mf">0.997</span><span class="p">,</span>
                 <span class="n">has_bias</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">weight_init</span><span class="o">=</span><span class="s1">&#39;normal&#39;</span><span class="p">,</span>
                 <span class="n">bias_init</span><span class="o">=</span><span class="s1">&#39;zeros&#39;</span><span class="p">,</span>
                 <span class="n">beta_init</span><span class="o">=</span><span class="s1">&#39;zeros&#39;</span><span class="p">,</span>
                 <span class="n">gamma_init</span><span class="o">=</span><span class="s1">&#39;ones&#39;</span><span class="p">,</span>
                 <span class="n">mean_init</span><span class="o">=</span><span class="s1">&#39;zeros&#39;</span><span class="p">,</span>
                 <span class="n">var_init</span><span class="o">=</span><span class="s1">&#39;ones&#39;</span><span class="p">,</span>
                 <span class="n">fake</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
                 <span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config_default</span><span class="p">,</span>
                 <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">,</span>
                 <span class="n">freeze_bn</span><span class="o">=</span><span class="mi">100000</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Conv2dBnFoldQuant layer&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Conv2dBnFoldQuant</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">in_channels</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">in_channels</span><span class="p">,</span> <span class="s2">&quot;in_channels&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">out_channels</span><span class="p">,</span> <span class="s2">&quot;out_channels&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">kernel_size</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">stride</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">stride</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">dilation</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">kernel_size_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">kernel_size_elem</span><span class="p">,</span> <span class="s1">&#39;kernel_size item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">stride_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">stride_elem</span><span class="p">,</span> <span class="s1">&#39;stride item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">dilation_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">dilation_elem</span><span class="p">,</span> <span class="s1">&#39;dilation item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">pad_mode</span> <span class="ow">not</span> <span class="ow">in</span> <span class="p">(</span><span class="s1">&#39;valid&#39;</span><span class="p">,</span> <span class="s1">&#39;same&#39;</span><span class="p">,</span> <span class="s1">&#39;pad&#39;</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;pad_mode&#39; should be one of values in &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;(&#39;valid&#39;, &#39;same&#39;, &#39;pad&#39;), but got </span><span class="si">{</span><span class="n">pad_mode</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">pad_mode</span> <span class="o">=</span> <span class="n">pad_mode</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="s1">&#39;padding&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">padding</span> <span class="o">=</span> <span class="n">padding</span>
        <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">):</span>
            <span class="k">for</span> <span class="n">pad</span> <span class="ow">in</span> <span class="n">padding</span><span class="p">:</span>
                <span class="n">Validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">pad</span><span class="p">,</span> <span class="s1">&#39;padding item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">padding</span> <span class="o">=</span> <span class="n">padding</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the type of &#39;padding&#39; must be int/tuple(int), &quot;</span>
                            <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="nb">type</span><span class="p">(</span><span class="n">padding</span><span class="p">)</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">!&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">group</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">group</span><span class="p">,</span> <span class="s2">&quot;group&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">eps</span> <span class="o">=</span> <span class="n">eps</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span> <span class="o">=</span> <span class="n">momentum</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span> <span class="o">=</span> <span class="n">has_bias</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">freeze_bn</span> <span class="o">=</span> <span class="n">freeze_bn</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_bool</span><span class="p">(</span><span class="n">fake</span><span class="p">,</span> <span class="s2">&quot;fake&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">quant_config</span> <span class="o">=</span> <span class="n">quant_config</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">quant_dtype</span> <span class="o">=</span> <span class="n">quant_dtype</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">is_gpu</span> <span class="o">=</span> <span class="n">context</span><span class="o">.</span><span class="n">get_context</span><span class="p">(</span><span class="s1">&#39;device_target&#39;</span><span class="p">)</span> <span class="o">==</span> <span class="s2">&quot;GPU&quot;</span>

        <span class="c1"># initialize convolution op and Parameter</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Conv2D</span><span class="p">(</span><span class="n">out_channel</span><span class="o">=</span><span class="n">out_channels</span><span class="p">,</span>
                             <span class="n">kernel_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">,</span>
                             <span class="n">pad_mode</span><span class="o">=</span><span class="n">pad_mode</span><span class="p">,</span>
                             <span class="n">pad</span><span class="o">=</span><span class="n">padding</span><span class="p">,</span>
                             <span class="n">stride</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">,</span>
                             <span class="n">dilation</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">,</span>
                             <span class="n">group</span><span class="o">=</span><span class="n">group</span><span class="p">)</span>
        <span class="n">weight_shape</span> <span class="o">=</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">,</span> <span class="n">in_channels</span> <span class="o">//</span> <span class="n">group</span><span class="p">,</span> <span class="o">*</span><span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">]</span>
        <span class="n">channel_axis</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">weight_init</span><span class="p">,</span> <span class="n">weight_shape</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;weight&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">bias_add</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">BiasAdd</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="k">if</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_bool</span><span class="p">(</span><span class="n">has_bias</span><span class="p">,</span> <span class="s2">&quot;has_bias&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">bias_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;bias&#39;</span><span class="p">)</span>

        <span class="c1"># initialize BatchNorm Parameter</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">gamma_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;gamma&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">beta</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">beta_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;beta&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">moving_mean</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">mean_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;moving_mean&#39;</span><span class="p">,</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">moving_variance</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">var_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;moving_variance&#39;</span><span class="p">,</span>
                                         <span class="n">requires_grad</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>

        <span class="c1"># initialize fake ops</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span> <span class="o">=</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">weight</span><span class="p">(</span><span class="n">ema</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                                                     <span class="n">channel_axis</span><span class="o">=</span><span class="n">channel_axis</span><span class="p">,</span>
                                                     <span class="n">num_channels</span><span class="o">=</span><span class="n">out_channels</span><span class="p">,</span>
                                                     <span class="n">quant_dtype</span><span class="o">=</span><span class="n">quant_dtype</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm_fold</span> <span class="o">=</span> <span class="n">BatchNormFoldCell</span><span class="p">(</span><span class="n">epsilon</span><span class="o">=</span><span class="n">eps</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="n">momentum</span><span class="p">,</span> <span class="n">freeze_bn</span><span class="o">=</span><span class="n">freeze_bn</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">correct_mul</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">CorrectionMul</span><span class="p">(</span><span class="n">channel_axis</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">context</span><span class="o">.</span><span class="n">get_context</span><span class="p">(</span><span class="s1">&#39;device_target&#39;</span><span class="p">)</span> <span class="o">==</span> <span class="s2">&quot;Ascend&quot;</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm_fold2_train</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">BatchNormFold2D</span><span class="p">(</span><span class="n">freeze_bn</span><span class="o">=</span><span class="n">freeze_bn</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm_fold2_infer</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">BatchNormFold2D</span><span class="p">(</span><span class="n">freeze_bn</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span>
        <span class="k">elif</span> <span class="n">context</span><span class="o">.</span><span class="n">get_context</span><span class="p">(</span><span class="s1">&#39;device_target&#39;</span><span class="p">)</span> <span class="o">==</span> <span class="s2">&quot;GPU&quot;</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm_fold2_train</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">BatchNormFold2</span><span class="p">(</span><span class="n">freeze_bn</span><span class="o">=</span><span class="n">freeze_bn</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm_fold2_infer</span> <span class="o">=</span> <span class="n">Q</span><span class="o">.</span><span class="n">BatchNormFold2</span><span class="p">(</span><span class="n">freeze_bn</span><span class="o">=</span><span class="mi">0</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, only the &#39;Ascend&#39; and &#39;GPU&#39; platforms&quot;</span>
                             <span class="sa">f</span><span class="s2">&quot; are supported, but got </span><span class="si">{</span><span class="n">context</span><span class="o">.</span><span class="n">get_context</span><span class="p">(</span><span class="s1">&#39;device_target&#39;</span><span class="p">)</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">step</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="s1">&#39;normal&#39;</span><span class="p">,</span> <span class="p">[</span><span class="mi">1</span><span class="p">],</span> <span class="n">dtype</span><span class="o">=</span><span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;step&#39;</span><span class="p">,</span> <span class="n">requires_grad</span><span class="o">=</span><span class="kc">False</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">one</span> <span class="o">=</span> <span class="n">Tensor</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">mstype</span><span class="o">.</span><span class="n">int32</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">assignadd</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">AssignAdd</span><span class="p">()</span>

<div class="viewcode-block" id="Conv2dBnFoldQuant.extend_repr"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.Conv2dBnFoldQuant.html#mindspore.nn.Conv2dBnFoldQuant.extend_repr">[docs]</a>    <span class="k">def</span> <span class="nf">extend_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Display instance object as string.&quot;&quot;&quot;</span>
        <span class="n">s</span> <span class="o">=</span> <span class="s1">&#39;in_channels=</span><span class="si">{}</span><span class="s1">, out_channels=</span><span class="si">{}</span><span class="s1">, kernel_size=</span><span class="si">{}</span><span class="s1">, stride=</span><span class="si">{}</span><span class="s1">, &#39;</span> \
            <span class="s1">&#39;pad_mode=</span><span class="si">{}</span><span class="s1">, padding=</span><span class="si">{}</span><span class="s1">, dilation=</span><span class="si">{}</span><span class="s1">, group=</span><span class="si">{}</span><span class="s1">, &#39;</span> \
            <span class="s1">&#39;fake=</span><span class="si">{}</span><span class="s1">, freeze_bn=</span><span class="si">{}</span><span class="s1">, momentum=</span><span class="si">{}</span><span class="s1">, quant_delay=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">in_channels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span><span class="p">,</span>
                                                                        <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">,</span>
                                                                        <span class="bp">self</span><span class="o">.</span><span class="n">pad_mode</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">padding</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">,</span>
                                                                        <span class="bp">self</span><span class="o">.</span><span class="n">group</span><span class="p">,</span>
                                                                        <span class="bp">self</span><span class="o">.</span><span class="n">fake</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">freeze_bn</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">momentum</span><span class="p">,</span>
                                                                        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span><span class="o">.</span><span class="n">quant_delay</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">s</span></div>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">out_conv</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">conv</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span><span class="p">:</span>
            <span class="n">out_conv</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_add</span><span class="p">(</span><span class="n">out_conv</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias</span><span class="p">)</span>
        <span class="c1"># BN fold1</span>
        <span class="n">batch_mean</span><span class="p">,</span> <span class="n">batch_std</span><span class="p">,</span> <span class="n">running_mean</span><span class="p">,</span> <span class="n">running_std</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm_fold</span><span class="p">(</span><span class="n">out_conv</span><span class="p">,</span>
                                                                               <span class="bp">self</span><span class="o">.</span><span class="n">moving_mean</span><span class="p">,</span>
                                                                               <span class="bp">self</span><span class="o">.</span><span class="n">moving_variance</span><span class="p">,</span>
                                                                               <span class="bp">self</span><span class="o">.</span><span class="n">step</span><span class="p">)</span>
        <span class="c1"># fake weight</span>
        <span class="n">weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">correct_mul</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span><span class="p">,</span> <span class="n">running_std</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake</span><span class="p">:</span>
            <span class="n">weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span><span class="p">(</span><span class="n">weight</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">conv</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">weight</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span><span class="p">:</span>
            <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_add</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias</span><span class="p">)</span>
        <span class="c1"># BN fold2</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">is_gpu</span><span class="p">:</span>
            <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">training</span><span class="p">:</span>
                <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm_fold2_train</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span><span class="p">,</span>
                                                 <span class="n">batch_std</span><span class="p">,</span> <span class="n">batch_mean</span><span class="p">,</span> <span class="n">running_std</span><span class="p">,</span> <span class="n">running_mean</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">step</span><span class="p">)</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">assignadd</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">step</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">one</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm_fold2_infer</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span><span class="p">,</span>
                                                 <span class="n">batch_std</span><span class="p">,</span> <span class="n">batch_mean</span><span class="p">,</span> <span class="n">running_std</span><span class="p">,</span> <span class="n">running_mean</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">step</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">training</span><span class="p">:</span>
                <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm_fold2_train</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span><span class="p">,</span> <span class="n">batch_std</span><span class="p">,</span> <span class="n">batch_mean</span><span class="p">,</span> <span class="n">running_std</span><span class="p">)</span>
                <span class="bp">self</span><span class="o">.</span><span class="n">assignadd</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">step</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">one</span><span class="p">)</span>
            <span class="k">else</span><span class="p">:</span>
                <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm_fold2_infer</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">beta</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">gamma</span><span class="p">,</span> <span class="n">running_std</span><span class="p">,</span> <span class="n">running_mean</span><span class="p">,</span> <span class="n">running_std</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">out</span></div>


<div class="viewcode-block" id="Conv2dBnWithoutFoldQuant"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.Conv2dBnWithoutFoldQuant.html#mindspore.nn.Conv2dBnWithoutFoldQuant">[docs]</a><span class="k">class</span> <span class="nc">Conv2dBnWithoutFoldQuant</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    2D convolution and batchnorm without fold with fake quantized construct.</span>

<span class="sd">    This part is a more detailed overview of Conv2d operation. For more details about Quantization,</span>
<span class="sd">    please refer to the implementation of class of `FakeQuantWithMinMaxObserver`,</span>
<span class="sd">    :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.</span>

<span class="sd">    .. math::</span>
<span class="sd">        y =x\times quant(w)+  b</span>

<span class="sd">        y_{bn} =\frac{y-E[y] }{\sqrt{Var[y]+  \epsilon  } } *\gamma +  \beta</span>

<span class="sd">    where :math:`quant` is the continuous execution of quant and dequant, you can refer to the implementation of</span>
<span class="sd">    class of `FakeQuantWithMinMaxObserver`, :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.</span>

<span class="sd">    Args:</span>
<span class="sd">        in_channels (int): The number of input channel :math:`C_{in}`.</span>
<span class="sd">        out_channels (int): The number of output channel :math:`C_{out}`.</span>
<span class="sd">        kernel_size (Union[int, tuple[int]]): Specifies the height and width of the 2D convolution window.</span>
<span class="sd">        stride (Union[int, tuple[int]]): Specifies stride for all spatial dimensions with the same value. Default: 1.</span>
<span class="sd">        pad_mode (str): Specifies padding mode. The optional values are &quot;same&quot;, &quot;valid&quot;, &quot;pad&quot;. Default: &quot;same&quot;.</span>
<span class="sd">        padding (Union[int, tuple[int]]): Implicit paddings on both sides of the `x`. Default: 0.</span>
<span class="sd">        dilation (Union[int, tuple[int]]): Specifies the dilation rate to use for dilated convolution. Default: 1.</span>
<span class="sd">        group (int): Splits filter into groups, `in_ channels` and `out_channels` must be</span>
<span class="sd">            divisible by the number of groups. Default: 1.</span>
<span class="sd">        has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.</span>
<span class="sd">        eps (float): Parameters for Batch Normalization. Default: 1e-5.</span>
<span class="sd">        momentum (float): Parameters for Batch Normalization op. Default: 0.997.</span>
<span class="sd">        weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.</span>
<span class="sd">            Default: &#39;normal&#39;.</span>
<span class="sd">        bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Default: &#39;zeros&#39;.</span>
<span class="sd">        quant_config (QuantConfig): Configures the types of quant observer and quant settings of weight and</span>
<span class="sd">            activation. Note that, QuantConfig is a special namedtuple, which is designed for quantization</span>
<span class="sd">            and can be generated by :func:`mindspore.compression.quant.create_quant_config` method.</span>
<span class="sd">            Default: QuantConfig with both items set to default :class:`FakeQuantWithMinMaxObserver`.</span>
<span class="sd">        quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `in_channels`, `out_channels` or `group` is not an int.</span>
<span class="sd">        TypeError: If `kernel_size`, `stride`, `padding` or `dilation` is neither an int nor a tuple.</span>
<span class="sd">        TypeError: If `has_bias` is not a bool.</span>
<span class="sd">        ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.</span>
<span class="sd">        ValueError: If `padding` is less than 0.</span>
<span class="sd">        ValueError: If `pad_mode` is not one of &#39;same&#39;, &#39;valid&#39;, &#39;pad&#39;.</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; import mindspore</span>
<span class="sd">        &gt;&gt;&gt; from mindspore.compression import quant</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; qconfig = quant.create_quant_config()</span>
<span class="sd">        &gt;&gt;&gt; conv2d_no_bnfold = nn.Conv2dBnWithoutFoldQuant(1, 1, kernel_size=(2, 2), stride=(1, 1), pad_mode=&quot;valid&quot;,</span>
<span class="sd">        ...                                                weight_init=&#39;ones&#39;, quant_config=qconfig)</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[[[1, 0, 3], [1, 4, 7], [2, 5, 2]]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; result = conv2d_no_bnfold(x)</span>
<span class="sd">        &gt;&gt;&gt; print(result)</span>
<span class="sd">        [[[[5.929658  13.835868]</span>
<span class="sd">           [11.859316  17.78116]]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">in_channels</span><span class="p">,</span>
                 <span class="n">out_channels</span><span class="p">,</span>
                 <span class="n">kernel_size</span><span class="p">,</span>
                 <span class="n">stride</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">pad_mode</span><span class="o">=</span><span class="s1">&#39;same&#39;</span><span class="p">,</span>
                 <span class="n">padding</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                 <span class="n">dilation</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">group</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">has_bias</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">eps</span><span class="o">=</span><span class="mf">1e-5</span><span class="p">,</span>
                 <span class="n">momentum</span><span class="o">=</span><span class="mf">0.997</span><span class="p">,</span>
                 <span class="n">weight_init</span><span class="o">=</span><span class="s1">&#39;normal&#39;</span><span class="p">,</span>
                 <span class="n">bias_init</span><span class="o">=</span><span class="s1">&#39;zeros&#39;</span><span class="p">,</span>
                 <span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config_default</span><span class="p">,</span>
                 <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Conv2dBnWithoutFoldQuant.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Conv2dBnWithoutFoldQuant</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">in_channels</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">in_channels</span><span class="p">,</span> <span class="s2">&quot;in_channels&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">out_channels</span><span class="p">,</span> <span class="s2">&quot;out_channels&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span> <span class="o">=</span> <span class="n">has_bias</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">kernel_size</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">stride</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">stride</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">dilation</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">kernel_size_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">kernel_size_elem</span><span class="p">,</span> <span class="s1">&#39;kernel_size item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">stride_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">stride_elem</span><span class="p">,</span> <span class="s1">&#39;stride item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">dilation_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">dilation_elem</span><span class="p">,</span> <span class="s1">&#39;dilation item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">pad_mode</span> <span class="ow">not</span> <span class="ow">in</span> <span class="p">(</span><span class="s1">&#39;valid&#39;</span><span class="p">,</span> <span class="s1">&#39;same&#39;</span><span class="p">,</span> <span class="s1">&#39;pad&#39;</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;pad_mode&#39; should be one of values in &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;(&#39;valid&#39;, &#39;same&#39;, &#39;pad&#39;), but got </span><span class="si">{</span><span class="n">pad_mode</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">pad_mode</span> <span class="o">=</span> <span class="n">pad_mode</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="s1">&#39;padding&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">padding</span> <span class="o">=</span> <span class="n">padding</span>
        <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">):</span>
            <span class="k">for</span> <span class="n">pad</span> <span class="ow">in</span> <span class="n">padding</span><span class="p">:</span>
                <span class="n">Validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">pad</span><span class="p">,</span> <span class="s1">&#39;padding item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">padding</span> <span class="o">=</span> <span class="n">padding</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the type of &#39;padding&#39; must be int/tuple(int), &quot;</span>
                            <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="nb">type</span><span class="p">(</span><span class="n">padding</span><span class="p">)</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">!&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">group</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">group</span><span class="p">,</span> <span class="s2">&quot;group&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">bias_add</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">BiasAdd</span><span class="p">()</span>
        <span class="k">if</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_bool</span><span class="p">(</span><span class="n">has_bias</span><span class="p">,</span> <span class="s2">&quot;has_bias&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">bias_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;bias&#39;</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="kc">None</span>
        <span class="c1"># initialize convolution op and Parameter</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">conv</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Conv2D</span><span class="p">(</span><span class="n">out_channel</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span><span class="p">,</span>
                             <span class="n">kernel_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">,</span>
                             <span class="n">mode</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                             <span class="n">pad_mode</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">pad_mode</span><span class="p">,</span>
                             <span class="n">pad</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">padding</span><span class="p">,</span>
                             <span class="n">stride</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">,</span>
                             <span class="n">dilation</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">,</span>
                             <span class="n">group</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">group</span><span class="p">)</span>
        <span class="n">weight_shape</span> <span class="o">=</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">,</span> <span class="n">in_channels</span> <span class="o">//</span> <span class="n">group</span><span class="p">,</span> <span class="o">*</span><span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">]</span>
        <span class="n">channel_axis</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">weight_init</span><span class="p">,</span> <span class="n">weight_shape</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;weight&#39;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span> <span class="o">=</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">weight</span><span class="p">(</span><span class="n">ema</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                                                     <span class="n">channel_axis</span><span class="o">=</span><span class="n">channel_axis</span><span class="p">,</span>
                                                     <span class="n">num_channels</span><span class="o">=</span><span class="n">out_channels</span><span class="p">,</span>
                                                     <span class="n">quant_dtype</span><span class="o">=</span><span class="n">quant_dtype</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm</span> <span class="o">=</span> <span class="n">BatchNorm2d</span><span class="p">(</span><span class="n">out_channels</span><span class="p">,</span> <span class="n">eps</span><span class="o">=</span><span class="n">eps</span><span class="p">,</span> <span class="n">momentum</span><span class="o">=</span><span class="n">momentum</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">conv</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">weight</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span><span class="p">:</span>
            <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_add</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">batchnorm</span><span class="p">(</span><span class="n">out</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">out</span>

<div class="viewcode-block" id="Conv2dBnWithoutFoldQuant.extend_repr"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.Conv2dBnWithoutFoldQuant.html#mindspore.nn.Conv2dBnWithoutFoldQuant.extend_repr">[docs]</a>    <span class="k">def</span> <span class="nf">extend_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Display instance object as string.&quot;&quot;&quot;</span>
        <span class="n">s</span> <span class="o">=</span> <span class="s1">&#39;in_channels=</span><span class="si">{}</span><span class="s1">, out_channels=</span><span class="si">{}</span><span class="s1">, kernel_size=</span><span class="si">{}</span><span class="s1">, stride=</span><span class="si">{}</span><span class="s1">, &#39;</span> \
            <span class="s1">&#39;pad_mode=</span><span class="si">{}</span><span class="s1">, padding=</span><span class="si">{}</span><span class="s1">, dilation=</span><span class="si">{}</span><span class="s1">, group=</span><span class="si">{}</span><span class="s1">, &#39;</span> \
            <span class="s1">&#39;has_bias=</span><span class="si">{}</span><span class="s1">, quant_delay=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">in_channels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">,</span>
                                                 <span class="bp">self</span><span class="o">.</span><span class="n">pad_mode</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">padding</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">group</span><span class="p">,</span>
                                                 <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span><span class="o">.</span><span class="n">quant_delay</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">s</span></div></div>


<div class="viewcode-block" id="Conv2dQuant"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.Conv2dQuant.html#mindspore.nn.Conv2dQuant">[docs]</a><span class="k">class</span> <span class="nc">Conv2dQuant</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    2D convolution with fake quantized operation layer.</span>

<span class="sd">    This part is a more detailed overview of Conv2d operation. For more details about Quantization,</span>
<span class="sd">    please refer to the implementation of class of `FakeQuantWithMinMaxObserver`,</span>
<span class="sd">    :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.</span>

<span class="sd">    Args:</span>
<span class="sd">        in_channels (int): The number of input channel :math:`C_{in}`.</span>
<span class="sd">        out_channels (int): The number of output channel :math:`C_{out}`.</span>
<span class="sd">        kernel_size (Union[int, tuple[int]]): Specifies the height and width of the 2D convolution window.</span>
<span class="sd">        stride (Union[int, tuple[int]]): Specifies stride for all spatial dimensions with the same value. Default: 1.</span>
<span class="sd">        pad_mode (str): Specifies padding mode. The optional values are &quot;same&quot;, &quot;valid&quot;, &quot;pad&quot;. Default: &quot;same&quot;.</span>
<span class="sd">        padding (Union[int, tuple[int]]): Implicit paddings on both sides of the `x`. Default: 0.</span>
<span class="sd">        dilation (Union[int, tuple[int]]): Specifies the dilation rate to use for dilated convolution. Default: 1.</span>
<span class="sd">        group (int): Splits filter into groups, `in_ channels` and `out_channels` must be</span>
<span class="sd">            divisible by the number of groups. Default: 1.</span>
<span class="sd">        has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.</span>
<span class="sd">        weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.</span>
<span class="sd">            Default: &#39;normal&#39;.</span>
<span class="sd">        bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Default: &#39;zeros&#39;.</span>
<span class="sd">        quant_config (QuantConfig): Configures the types of quant observer and quant settings of weight and</span>
<span class="sd">            activation. Note that, QuantConfig is a special namedtuple, which is designed for quantization</span>
<span class="sd">            and can be generated by :func:`mindspore.compression.quant.create_quant_config` method.</span>
<span class="sd">            Default: QuantConfig with both items set to default :class:`FakeQuantWithMinMaxObserver`.</span>
<span class="sd">        quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.</span>
<span class="sd">          The input dimension is preferably 2D or 4D.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `in_channels`, `out_channels` or `group` is not an int.</span>
<span class="sd">        TypeError: If `kernel_size`, `stride`, `padding` or `dilation` is neither an int nor a tuple.</span>
<span class="sd">        TypeError: If `has_bias` is not a bool.</span>
<span class="sd">        ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.</span>
<span class="sd">        ValueError: If `padding` is less than 0.</span>
<span class="sd">        ValueError: If `pad_mode` is not one of &#39;same&#39;, &#39;valid&#39;, &#39;pad&#39;.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; import mindspore</span>
<span class="sd">        &gt;&gt;&gt; from mindspore.compression import quant</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; qconfig = quant.create_quant_config()</span>
<span class="sd">        &gt;&gt;&gt; conv2d_quant = nn.Conv2dQuant(1, 1, kernel_size=(2, 2), stride=(1, 1), pad_mode=&quot;valid&quot;,</span>
<span class="sd">        ...                               weight_init=&#39;ones&#39;, quant_config=qconfig)</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[[[1, 0, 3], [1, 4, 7], [2, 5, 2]]]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; result = conv2d_quant(x)</span>
<span class="sd">        &gt;&gt;&gt; print(result)</span>
<span class="sd">        [[[[5.9296875  13.8359375]</span>
<span class="sd">           [11.859375  17.78125]]]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">in_channels</span><span class="p">,</span>
                 <span class="n">out_channels</span><span class="p">,</span>
                 <span class="n">kernel_size</span><span class="p">,</span>
                 <span class="n">stride</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">pad_mode</span><span class="o">=</span><span class="s1">&#39;same&#39;</span><span class="p">,</span>
                 <span class="n">padding</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                 <span class="n">dilation</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">group</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                 <span class="n">has_bias</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">weight_init</span><span class="o">=</span><span class="s1">&#39;normal&#39;</span><span class="p">,</span>
                 <span class="n">bias_init</span><span class="o">=</span><span class="s1">&#39;zeros&#39;</span><span class="p">,</span>
                 <span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config_default</span><span class="p">,</span>
                 <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize Conv2dQuant.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">Conv2dQuant</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">in_channels</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">in_channels</span><span class="p">,</span> <span class="s2">&quot;in_channels&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">out_channels</span><span class="p">,</span> <span class="s2">&quot;out_channels&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span> <span class="o">=</span> <span class="n">has_bias</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">kernel_size</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">stride</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">stride</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span> <span class="o">=</span> <span class="n">twice</span><span class="p">(</span><span class="n">dilation</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">kernel_size_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">kernel_size_elem</span><span class="p">,</span> <span class="s1">&#39;kernel_size item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">stride_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">stride_elem</span><span class="p">,</span> <span class="s1">&#39;stride item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">for</span> <span class="n">dilation_elem</span> <span class="ow">in</span> <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">:</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">dilation_elem</span><span class="p">,</span> <span class="s1">&#39;dilation item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">if</span> <span class="n">pad_mode</span> <span class="ow">not</span> <span class="ow">in</span> <span class="p">(</span><span class="s1">&#39;valid&#39;</span><span class="p">,</span> <span class="s1">&#39;same&#39;</span><span class="p">,</span> <span class="s1">&#39;pad&#39;</span><span class="p">):</span>
            <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;pad_mode&#39; should be one of values &quot;</span>
                             <span class="sa">f</span><span class="s2">&quot;in (&#39;valid&#39;, &#39;same&#39;, &#39;pad&#39;), but got </span><span class="si">{</span><span class="n">pad_mode</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">pad_mode</span> <span class="o">=</span> <span class="n">pad_mode</span>
        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="nb">int</span><span class="p">):</span>
            <span class="n">Validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="s1">&#39;padding&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">padding</span> <span class="o">=</span> <span class="n">padding</span>
        <span class="k">elif</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">padding</span><span class="p">,</span> <span class="nb">tuple</span><span class="p">):</span>
            <span class="k">for</span> <span class="n">pad</span> <span class="ow">in</span> <span class="n">padding</span><span class="p">:</span>
                <span class="n">Validator</span><span class="o">.</span><span class="n">check_non_negative_int</span><span class="p">(</span><span class="n">pad</span><span class="p">,</span> <span class="s1">&#39;padding item&#39;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">padding</span> <span class="o">=</span> <span class="n">padding</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the type of &#39;padding&#39; must be int/tuple(int), &quot;</span>
                            <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="nb">type</span><span class="p">(</span><span class="n">padding</span><span class="p">)</span><span class="o">.</span><span class="vm">__name__</span><span class="si">}</span><span class="s2">!&quot;</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">group</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">group</span><span class="p">,</span> <span class="s2">&quot;group&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>

        <span class="n">weight_shape</span> <span class="o">=</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">,</span> <span class="n">in_channels</span> <span class="o">//</span> <span class="n">group</span><span class="p">,</span> <span class="o">*</span><span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">]</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">weight_init</span><span class="p">,</span> <span class="n">weight_shape</span><span class="p">),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;weight&#39;</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">bias_add</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">BiasAdd</span><span class="p">()</span>
        <span class="k">if</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_bool</span><span class="p">(</span><span class="n">has_bias</span><span class="p">,</span> <span class="s2">&quot;has_bias&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">):</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span><span class="n">bias_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s1">&#39;bias&#39;</span><span class="p">)</span>
        <span class="k">else</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="kc">None</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">conv</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Conv2D</span><span class="p">(</span><span class="n">out_channel</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span><span class="p">,</span>
                             <span class="n">kernel_size</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">,</span>
                             <span class="n">mode</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
                             <span class="n">pad_mode</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">pad_mode</span><span class="p">,</span>
                             <span class="n">pad</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">padding</span><span class="p">,</span>
                             <span class="n">stride</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">,</span>
                             <span class="n">dilation</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">,</span>
                             <span class="n">group</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">group</span><span class="p">)</span>
        <span class="n">channel_axis</span> <span class="o">=</span> <span class="mi">0</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span> <span class="o">=</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">weight</span><span class="p">(</span><span class="n">ema</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                                                     <span class="n">channel_axis</span><span class="o">=</span><span class="n">channel_axis</span><span class="p">,</span>
                                                     <span class="n">num_channels</span><span class="o">=</span><span class="n">out_channels</span><span class="p">,</span>
                                                     <span class="n">quant_dtype</span><span class="o">=</span><span class="n">quant_dtype</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="n">weight</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">)</span>
        <span class="n">out</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">conv</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">weight</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span><span class="p">:</span>
            <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_add</span><span class="p">(</span><span class="n">out</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">out</span>

<div class="viewcode-block" id="Conv2dQuant.extend_repr"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.Conv2dQuant.html#mindspore.nn.Conv2dQuant.extend_repr">[docs]</a>    <span class="k">def</span> <span class="nf">extend_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Display instance object as string.&quot;&quot;&quot;</span>
        <span class="n">s</span> <span class="o">=</span> <span class="s1">&#39;in_channels=</span><span class="si">{}</span><span class="s1">, out_channels=</span><span class="si">{}</span><span class="s1">, kernel_size=</span><span class="si">{}</span><span class="s1">, stride=</span><span class="si">{}</span><span class="s1">, &#39;</span> \
            <span class="s1">&#39;pad_mode=</span><span class="si">{}</span><span class="s1">, padding=</span><span class="si">{}</span><span class="s1">, dilation=</span><span class="si">{}</span><span class="s1">, group=</span><span class="si">{}</span><span class="s1">, &#39;</span> \
            <span class="s1">&#39;has_bias=</span><span class="si">{}</span><span class="s1">, quant_delay=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">in_channels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">kernel_size</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">stride</span><span class="p">,</span>
                                                 <span class="bp">self</span><span class="o">.</span><span class="n">pad_mode</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">padding</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">dilation</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">group</span><span class="p">,</span>
                                                 <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span><span class="o">.</span><span class="n">quant_delay</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">s</span></div></div>


<div class="viewcode-block" id="DenseQuant"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.DenseQuant.html#mindspore.nn.DenseQuant">[docs]</a><span class="k">class</span> <span class="nc">DenseQuant</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    The fully connected layer with fake quantized operation.</span>

<span class="sd">    This part is a more detailed overview of Dense operation. For more details about Quantization,</span>
<span class="sd">    please refer to the implementation of class of `FakeQuantWithMinMaxObserver`,</span>
<span class="sd">    :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.</span>

<span class="sd">    Args:</span>
<span class="sd">        in_channels (int): The dimension of the input space.</span>
<span class="sd">        out_channels (int): The dimension of the output space.</span>
<span class="sd">        weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter. The dtype</span>
<span class="sd">            is same as `x`. The values of str refer to the function `initializer`. Default: &#39;normal&#39;.</span>
<span class="sd">        bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter. The dtype is</span>
<span class="sd">            same as `x`. The values of str refer to the function `initializer`. Default: &#39;zeros&#39;.</span>
<span class="sd">        has_bias (bool): Specifies whether the layer uses a bias vector. Default: True.</span>
<span class="sd">        activation (Union[str, Cell, Primitive]): The regularization function applied to the output of the layer,</span>
<span class="sd">            eg. &#39;relu&#39;. Default: None.</span>
<span class="sd">        quant_config (QuantConfig): Configures the types of quant observer and quant settings of weight and</span>
<span class="sd">            activation. Note that, QuantConfig is a special namedtuple, which is designed for quantization</span>
<span class="sd">            and can be generated by :func:`mindspore.compression.quant.create_quant_config` method.</span>
<span class="sd">            Default: QuantConfig with both items set to default :class:`FakeQuantWithMinMaxObserver`.</span>
<span class="sd">        quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.</span>
<span class="sd">          The input dimension is preferably 2D or 4D.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `in_channels`, `out_channels` is not an int.</span>
<span class="sd">        TypeError: If `has_bias` is not a bool.</span>
<span class="sd">        TypeError: If `activation` is not str, Cell and Primitive.</span>
<span class="sd">        ValueError: If `in_channels` or `out_channels` is less than 1.</span>
<span class="sd">        ValueError: If the dims of `weight_init` is not equal to 2 or the first element of `weight_init` is not equal</span>
<span class="sd">            to `out_channels` or the second element of `weight_init` is not equal to `in_channels`.</span>
<span class="sd">        ValueError: If the dims of `bias_init` is not equal to 1 or the element of `bias_init` is not equal</span>
<span class="sd">            to `out_channels`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; import mindspore</span>
<span class="sd">        &gt;&gt;&gt; from mindspore.compression import quant</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; qconfig = quant.create_quant_config()</span>
<span class="sd">        &gt;&gt;&gt; dense_quant = nn.DenseQuant(2, 1, weight_init=&#39;ones&#39;, quant_config=qconfig)</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 5], [3, 4]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; result = dense_quant(x)</span>
<span class="sd">        &gt;&gt;&gt; print(result)</span>
<span class="sd">        [[5.929413]</span>
<span class="sd">         [6.9176483]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">in_channels</span><span class="p">,</span>
                 <span class="n">out_channels</span><span class="p">,</span>
                 <span class="n">weight_init</span><span class="o">=</span><span class="s1">&#39;normal&#39;</span><span class="p">,</span>
                 <span class="n">bias_init</span><span class="o">=</span><span class="s1">&#39;zeros&#39;</span><span class="p">,</span>
                 <span class="n">has_bias</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
                 <span class="n">activation</span><span class="o">=</span><span class="kc">None</span><span class="p">,</span>
                 <span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config_default</span><span class="p">,</span>
                 <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize DenseQuant.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">DenseQuant</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">in_channels</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">in_channels</span><span class="p">,</span> <span class="s2">&quot;in_channels&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_positive_int</span><span class="p">(</span><span class="n">out_channels</span><span class="p">,</span> <span class="s2">&quot;out_channels&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_bool</span><span class="p">(</span><span class="n">has_bias</span><span class="p">,</span> <span class="s2">&quot;has_bias&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>

        <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">weight_init</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">):</span>
            <span class="k">if</span> <span class="n">weight_init</span><span class="o">.</span><span class="n">ndim</span> <span class="o">!=</span> <span class="mi">2</span> <span class="ow">or</span> <span class="n">weight_init</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">!=</span> <span class="n">out_channels</span> <span class="ow">or</span> \
                    <span class="n">weight_init</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">1</span><span class="p">]</span> <span class="o">!=</span> <span class="n">in_channels</span><span class="p">:</span>
                <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, weight init shape error. The ndim of &#39;weight_init&#39; should &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;be equal to 2, and the first dim should be equal to &#39;out_channels&#39;, and the &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;second dim should be equal to &#39;in_channels&#39;. But got &#39;weight_init&#39;: </span><span class="si">{</span><span class="n">weight_init</span><span class="si">}</span><span class="s2">, &quot;</span>
                                 <span class="sa">f</span><span class="s2">&quot;&#39;out_channels&#39;: </span><span class="si">{</span><span class="n">out_channels</span><span class="si">}</span><span class="s2">, &#39;in_channels&#39;: </span><span class="si">{</span><span class="n">in_channels</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">weight</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span>
            <span class="n">weight_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">,</span> <span class="n">in_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;weight&quot;</span><span class="p">)</span>

        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span><span class="p">:</span>
            <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">bias_init</span><span class="p">,</span> <span class="n">Tensor</span><span class="p">):</span>
                <span class="k">if</span> <span class="n">bias_init</span><span class="o">.</span><span class="n">ndim</span> <span class="o">!=</span> <span class="mi">1</span> <span class="ow">or</span> <span class="n">bias_init</span><span class="o">.</span><span class="n">shape</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">!=</span> <span class="n">out_channels</span><span class="p">:</span>
                    <span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, bias init shape error. The ndim of &#39;bias_init&#39; should &quot;</span>
                                     <span class="sa">f</span><span class="s2">&quot;be equal to 1, and the first dim should be equal to &#39;out_channels&#39;. But got &quot;</span>
                                     <span class="sa">f</span><span class="s2">&quot;&#39;bias_init&#39;: </span><span class="si">{</span><span class="n">bias_init</span><span class="si">}</span><span class="s2">, &#39;out_channels&#39;: </span><span class="si">{</span><span class="n">out_channels</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

            <span class="bp">self</span><span class="o">.</span><span class="n">bias</span> <span class="o">=</span> <span class="n">Parameter</span><span class="p">(</span><span class="n">initializer</span><span class="p">(</span>
                <span class="n">bias_init</span><span class="p">,</span> <span class="p">[</span><span class="n">out_channels</span><span class="p">]),</span> <span class="n">name</span><span class="o">=</span><span class="s2">&quot;bias&quot;</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">matmul</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">MatMul</span><span class="p">(</span><span class="n">transpose_b</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">bias_add</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">BiasAdd</span><span class="p">()</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">activation</span> <span class="o">=</span> <span class="n">get_activation</span><span class="p">(</span><span class="n">activation</span><span class="p">)</span> <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">activation</span><span class="p">,</span> <span class="nb">str</span><span class="p">)</span> <span class="k">else</span> <span class="n">activation</span>
        <span class="k">if</span> <span class="n">activation</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="ow">and</span> <span class="ow">not</span> <span class="nb">isinstance</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">activation</span><span class="p">,</span> <span class="p">(</span><span class="n">Cell</span><span class="p">,</span> <span class="n">Primitive</span><span class="p">)):</span>
            <span class="k">raise</span> <span class="ne">TypeError</span><span class="p">(</span><span class="sa">f</span><span class="s2">&quot;For &#39;</span><span class="si">{</span><span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="si">}</span><span class="s2">&#39;, the &#39;activation&#39; must be str or Cell or Primitive, &quot;</span>
                            <span class="sa">f</span><span class="s2">&quot;but got </span><span class="si">{</span><span class="n">activation</span><span class="si">}</span><span class="s2">.&quot;</span><span class="p">)</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">activation_flag</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">activation</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span> <span class="o">=</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">weight</span><span class="p">(</span><span class="n">ema</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                                                     <span class="n">channel_axis</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span>
                                                     <span class="n">num_channels</span><span class="o">=</span><span class="n">out_channels</span><span class="p">,</span>
                                                     <span class="n">quant_dtype</span><span class="o">=</span><span class="n">quant_dtype</span><span class="p">)</span>

<div class="viewcode-block" id="DenseQuant.construct"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.DenseQuant.html#mindspore.nn.DenseQuant.construct">[docs]</a>    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Use operators to construct the Dense layer.</span>

<span class="sd">        Args:</span>
<span class="sd">            x (Tensor): Input tensor.</span>
<span class="sd">        &quot;&quot;&quot;</span>
        <span class="n">output</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_weight</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">)</span>
        <span class="n">output</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">matmul</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">output</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span><span class="p">:</span>
            <span class="n">output</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias_add</span><span class="p">(</span><span class="n">output</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">bias</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">activation_flag</span><span class="p">:</span>
            <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">activation</span><span class="p">(</span><span class="n">output</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">output</span></div>

<div class="viewcode-block" id="DenseQuant.extend_repr"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.DenseQuant.html#mindspore.nn.DenseQuant.extend_repr">[docs]</a>    <span class="k">def</span> <span class="nf">extend_repr</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;A pretty print for Dense layer.&quot;&quot;&quot;</span>
        <span class="n">s</span> <span class="o">=</span> <span class="s1">&#39;in_channels=</span><span class="si">{}</span><span class="s1">, out_channels=</span><span class="si">{}</span><span class="s1">, weight=</span><span class="si">{}</span><span class="s1">, has_bias=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">in_channels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">out_channels</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">weight</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">has_bias</span><span class="p">:</span>
            <span class="n">s</span> <span class="o">+=</span> <span class="s1">&#39;, bias=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">bias</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">activation_flag</span><span class="p">:</span>
            <span class="n">s</span> <span class="o">+=</span> <span class="s1">&#39;, activation=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">activation</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">s</span></div></div>


<span class="k">class</span> <span class="nc">_QuantActivation</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Base class for quantization aware training activation function. Adds fake quantized operation</span>
<span class="sd">    after activation operation.</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="nf">get_origin</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">raise</span> <span class="ne">NotImplementedError</span>


<div class="viewcode-block" id="ActQuant"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.ActQuant.html#mindspore.nn.ActQuant">[docs]</a><span class="k">class</span> <span class="nc">ActQuant</span><span class="p">(</span><span class="n">_QuantActivation</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Quantization aware training activation function.</span>

<span class="sd">    Add the fake quantized operation to the end of activation operation, by which the output of activation</span>
<span class="sd">    operation will be truncated. For more details about Quantization, please refer to the implementation</span>
<span class="sd">    of subclass of `FakeQuantWithMinMaxObserver`, :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.</span>

<span class="sd">    Args:</span>
<span class="sd">        activation (Cell): Activation cell.</span>
<span class="sd">        ema (bool): The exponential Moving Average algorithm updates min and max. Default: False.</span>
<span class="sd">        ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.999.</span>
<span class="sd">        fake_before (bool): Whether add fake quantized operation before activation. Default: False.</span>
<span class="sd">        quant_config (QuantConfig): Configures the types of quant observer and quant settings of weight and</span>
<span class="sd">            activation. Note that, QuantConfig is a special namedtuple, which is designed for quantization</span>
<span class="sd">            and can be generated by :func:`mindspore.compression.quant.create_quant_config` method.</span>
<span class="sd">            Default: QuantConfig with both items set to default :class:`FakeQuantWithMinMaxObserver`.</span>
<span class="sd">        quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x** (Tensor) - The input of ActQuant. The input dimension is preferably 2D or 4D.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, with the same type and shape as the `x`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `activation` is not an instance of Cell.</span>
<span class="sd">        TypeError: If `fake_before` is not a bool.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; import mindspore</span>
<span class="sd">        &gt;&gt;&gt; from mindspore.compression import quant</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; qconfig = quant.create_quant_config()</span>
<span class="sd">        &gt;&gt;&gt; act_quant = nn.ActQuant(nn.ReLU(), quant_config=qconfig)</span>
<span class="sd">        &gt;&gt;&gt; x = Tensor(np.array([[1, 2, -1], [-2, 0, -1]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; result = act_quant(x)</span>
<span class="sd">        &gt;&gt;&gt; print(result)</span>
<span class="sd">        [[0.9882355 1.9764705 0.       ]</span>
<span class="sd">         [0.        0.        0.       ]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">activation</span><span class="p">,</span>
                 <span class="n">ema</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">ema_decay</span><span class="o">=</span><span class="mf">0.999</span><span class="p">,</span>
                 <span class="n">fake_before</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span>
                 <span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config_default</span><span class="p">,</span>
                 <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize ActQuant.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">ActQuant</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="n">act_class</span> <span class="o">=</span> <span class="n">activation</span><span class="o">.</span><span class="vm">__class__</span>
        <span class="n">act_list</span> <span class="o">=</span> <span class="p">[</span><span class="n">nn</span><span class="o">.</span><span class="n">ReLU</span><span class="p">,</span> <span class="n">nn</span><span class="o">.</span><span class="n">ReLU6</span><span class="p">]</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">act</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_isinstance</span><span class="p">(</span><span class="s2">&quot;activation&quot;</span><span class="p">,</span> <span class="n">activation</span><span class="p">,</span> <span class="n">Cell</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake_before</span> <span class="o">=</span> <span class="n">Validator</span><span class="o">.</span><span class="n">check_bool</span><span class="p">(</span><span class="n">fake_before</span><span class="p">,</span> <span class="s2">&quot;fake_before&quot;</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">cls_name</span><span class="p">)</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_before</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_act_before</span> <span class="o">=</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">activation</span><span class="p">(</span><span class="n">min_init</span><span class="o">=-</span><span class="mi">6</span><span class="p">,</span>
                                                                 <span class="n">max_init</span><span class="o">=</span><span class="mi">6</span><span class="p">,</span>
                                                                 <span class="n">ema</span><span class="o">=</span><span class="n">ema</span><span class="p">,</span>
                                                                 <span class="n">ema_decay</span><span class="o">=</span><span class="n">ema_decay</span><span class="p">,</span>
                                                                 <span class="n">quant_dtype</span><span class="o">=</span><span class="n">quant_dtype</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">neg_trunc</span> <span class="o">=</span> <span class="kc">False</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span> <span class="o">=</span> <span class="kc">False</span>
        <span class="n">preset_dict</span> <span class="o">=</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">activation</span><span class="o">.</span><span class="n">p</span><span class="o">.</span><span class="n">keywords</span>
        <span class="k">if</span> <span class="s1">&#39;mode&#39;</span> <span class="ow">in</span> <span class="n">preset_dict</span> <span class="ow">and</span> <span class="n">preset_dict</span><span class="p">[</span><span class="s1">&#39;mode&#39;</span><span class="p">]</span> <span class="o">==</span> <span class="s2">&quot;LEARNED_SCALE&quot;</span> <span class="ow">and</span> <span class="n">act_class</span> <span class="ow">in</span> <span class="n">act_list</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">neg_trunc</span> <span class="o">=</span> <span class="kc">True</span>
        <span class="k">elif</span> <span class="s1">&#39;narrow_range&#39;</span> <span class="ow">in</span> <span class="n">preset_dict</span><span class="p">:</span>
            <span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span> <span class="o">=</span> <span class="n">preset_dict</span><span class="p">[</span><span class="s1">&#39;narrow_range&#39;</span><span class="p">]</span>

        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_act</span> <span class="o">=</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">activation</span><span class="p">(</span><span class="n">min_init</span><span class="o">=-</span><span class="mi">6</span><span class="p">,</span>
                                                      <span class="n">max_init</span><span class="o">=</span><span class="mi">6</span><span class="p">,</span>
                                                      <span class="n">ema</span><span class="o">=</span><span class="n">ema</span><span class="p">,</span>
                                                      <span class="n">ema_decay</span><span class="o">=</span><span class="n">ema_decay</span><span class="p">,</span>
                                                      <span class="n">quant_dtype</span><span class="o">=</span><span class="n">quant_dtype</span><span class="p">,</span>
                                                      <span class="n">neg_trunc</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">neg_trunc</span><span class="p">,</span>
                                                      <span class="n">narrow_range</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">narrow_range</span><span class="p">)</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x</span><span class="p">):</span>
        <span class="k">if</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_before</span><span class="p">:</span>
            <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_act_before</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">act</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_act</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span>

    <span class="k">def</span> <span class="nf">get_origin</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
        <span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">act</span></div>


<div class="viewcode-block" id="TensorAddQuant"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.TensorAddQuant.html#mindspore.nn.TensorAddQuant">[docs]</a><span class="k">class</span> <span class="nc">TensorAddQuant</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Adds fake quantized operation after TensorAdd operation.</span>

<span class="sd">    This part is a more detailed overview of TensorAdd operation. For more details about Quantization,</span>
<span class="sd">    please refer to the implementation of class of `FakeQuantWithMinMaxObserver`,</span>
<span class="sd">    :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.</span>

<span class="sd">    Args:</span>
<span class="sd">        ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.999.</span>
<span class="sd">        quant_config (QuantConfig): Configures the types of quant observer and quant settings of weight and</span>
<span class="sd">            activation. Note that, QuantConfig is a special namedtuple, which is designed for quantization</span>
<span class="sd">            and can be generated by :func:`mindspore.compression.quant.create_quant_config` method.</span>
<span class="sd">            Default: QuantConfig with both items set to default :class:`FakeQuantWithMinMaxObserver`.</span>
<span class="sd">        quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x1** (Tensor) - The first tensor of TensorAddQuant. The input dimension is preferably 2D or 4D.</span>
<span class="sd">        - **x2** (Tensor) - The second tensor of TensorAddQuant. Has the same shape with `x1`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, with the same type and shape as the `x1`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `ema_decay` is not a float.</span>
<span class="sd">        ValueError: If the shape of `x2` is different with `x1`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; import mindspore</span>
<span class="sd">        &gt;&gt;&gt; from mindspore.compression import quant</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; qconfig = quant.create_quant_config()</span>
<span class="sd">        &gt;&gt;&gt; add_quant = nn.TensorAddQuant(quant_config=qconfig)</span>
<span class="sd">        &gt;&gt;&gt; x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; x2 = Tensor(np.ones((2, 3)), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = add_quant(x1, x2)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 1.9764705  3.011765   1.9764705]</span>
<span class="sd">         [-0.9882355  0.9882355  0.       ]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">ema_decay</span><span class="o">=</span><span class="mf">0.999</span><span class="p">,</span>
                 <span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config_default</span><span class="p">,</span>
                 <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize TensorAddQuant.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">TensorAddQuant</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_act</span> <span class="o">=</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">activation</span><span class="p">(</span><span class="n">min_init</span><span class="o">=-</span><span class="mi">6</span><span class="p">,</span>
                                                      <span class="n">max_init</span><span class="o">=</span><span class="mi">6</span><span class="p">,</span>
                                                      <span class="n">ema</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
                                                      <span class="n">ema_decay</span><span class="o">=</span><span class="n">ema_decay</span><span class="p">,</span>
                                                      <span class="n">quant_dtype</span><span class="o">=</span><span class="n">quant_dtype</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">add</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Add</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">):</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">add</span><span class="p">(</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_act</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span></div>


<div class="viewcode-block" id="MulQuant"><a class="viewcode-back" href="../../../../api_python/nn/mindspore.nn.MulQuant.html#mindspore.nn.MulQuant">[docs]</a><span class="k">class</span> <span class="nc">MulQuant</span><span class="p">(</span><span class="n">Cell</span><span class="p">):</span>
    <span class="sa">r</span><span class="sd">&quot;&quot;&quot;</span>
<span class="sd">    Adds fake quantized operation after `Mul` operation.</span>

<span class="sd">    This part is a more detailed overview of `Mul` operation. For more details about Quantization,</span>
<span class="sd">    please refer to the implementation of class of `FakeQuantWithMinMaxObserver`,</span>
<span class="sd">    :class:`mindspore.nn.FakeQuantWithMinMaxObserver`.</span>

<span class="sd">    Args:</span>
<span class="sd">        ema_decay (float): Exponential Moving Average algorithm parameter. Default: 0.999.</span>
<span class="sd">        quant_config (QuantConfig): Configures the types of quant observer and quant settings of weight and</span>
<span class="sd">            activation. Note that, QuantConfig is a special namedtuple, which is designed for quantization</span>
<span class="sd">            and can be generated by :func:`mindspore.compression.quant.create_quant_config` method.</span>
<span class="sd">            Default: QuantConfig with both items set to default :class:`FakeQuantWithMinMaxObserver`.</span>
<span class="sd">        quant_dtype (QuantDtype): Specifies the FakeQuant datatype. Default: QuantDtype.INT8.</span>

<span class="sd">    Inputs:</span>
<span class="sd">        - **x1** (Tensor) - The first tensor of MulQuant. The input dimension is preferably 2D or 4D.</span>
<span class="sd">        - **x2** (Tensor) - The second tensor of MulQuant. Has the same shape with `x1`.</span>

<span class="sd">    Outputs:</span>
<span class="sd">        Tensor, with the same type and shape as the `x1`.</span>

<span class="sd">    Raises:</span>
<span class="sd">        TypeError: If `ema_decay` is not a float.</span>
<span class="sd">        ValueError: If the shape of `x2` is different with `x1`.</span>

<span class="sd">    Supported Platforms:</span>
<span class="sd">        ``Ascend`` ``GPU``</span>

<span class="sd">    Examples:</span>
<span class="sd">        &gt;&gt;&gt; import mindspore</span>
<span class="sd">        &gt;&gt;&gt; from mindspore.compression import quant</span>
<span class="sd">        &gt;&gt;&gt; from mindspore import Tensor</span>
<span class="sd">        &gt;&gt;&gt; qconfig = quant.create_quant_config()</span>
<span class="sd">        &gt;&gt;&gt; mul_quant = nn.MulQuant(quant_config=qconfig)</span>
<span class="sd">        &gt;&gt;&gt; x1 = Tensor(np.array([[1, 2, 1], [-2, 0, -1]]), mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; x2 = Tensor(np.ones((2, 3)) * 2, mindspore.float32)</span>
<span class="sd">        &gt;&gt;&gt; output = mul_quant(x1, x2)</span>
<span class="sd">        &gt;&gt;&gt; print(output)</span>
<span class="sd">        [[ 1.9764705  4.0000005  1.9764705]</span>
<span class="sd">         [-4.         0.        -1.9764705]]</span>
<span class="sd">    &quot;&quot;&quot;</span>

    <span class="k">def</span> <span class="fm">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span>
                 <span class="n">ema_decay</span><span class="o">=</span><span class="mf">0.999</span><span class="p">,</span>
                 <span class="n">quant_config</span><span class="o">=</span><span class="n">quant_config_default</span><span class="p">,</span>
                 <span class="n">quant_dtype</span><span class="o">=</span><span class="n">QuantDtype</span><span class="o">.</span><span class="n">INT8</span><span class="p">):</span>
        <span class="sd">&quot;&quot;&quot;Initialize MulQuant.&quot;&quot;&quot;</span>
        <span class="nb">super</span><span class="p">(</span><span class="n">MulQuant</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="fm">__init__</span><span class="p">()</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_act</span> <span class="o">=</span> <span class="n">quant_config</span><span class="o">.</span><span class="n">activation</span><span class="p">(</span><span class="n">min_init</span><span class="o">=-</span><span class="mi">6</span><span class="p">,</span>
                                                      <span class="n">max_init</span><span class="o">=</span><span class="mi">6</span><span class="p">,</span>
                                                      <span class="n">ema</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
                                                      <span class="n">ema_decay</span><span class="o">=</span><span class="n">ema_decay</span><span class="p">,</span>
                                                      <span class="n">quant_dtype</span><span class="o">=</span><span class="n">quant_dtype</span><span class="p">)</span>
        <span class="bp">self</span><span class="o">.</span><span class="n">mul</span> <span class="o">=</span> <span class="n">P</span><span class="o">.</span><span class="n">Mul</span><span class="p">()</span>

    <span class="k">def</span> <span class="nf">construct</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">):</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">mul</span><span class="p">(</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">)</span>
        <span class="n">x</span> <span class="o">=</span> <span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_act</span><span class="p">(</span><span class="n">x</span><span class="p">)</span>
        <span class="k">return</span> <span class="n">x</span></div>
</pre></div>

           </div>
           
          </div>
          <footer>

  <hr/>

  <div role="contentinfo">
    <p>
        &#169; Copyright 2021, MindSpore.

    </p>
  </div>
    
    
    
    Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
    
    <a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
    
    provided by <a href="https://readthedocs.org">Read the Docs</a>. 

</footer>
        </div>
      </div>

    </section>

  </div>
  

  <script type="text/javascript">
      jQuery(function () {
          SphinxRtdTheme.Navigation.enable(true);
      });
  </script>

  
  
    
   

</body>
</html>